diff options
Diffstat (limited to 'cloudinit')
48 files changed, 2762 insertions, 236 deletions
| diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 221f341c..618b0160 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -14,9 +14,9 @@ except ImportError:  KNOWN_CLOUD_NAMES = [      'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma', -    'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', 'MAAS', -    'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', 'Scaleway', 'SmartOS', -    'VMware', 'Other'] +    'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', +    'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', +    'Scaleway', 'SmartOS', 'VMware', 'Other']  # Potentially clear text collected logs  CLOUDINIT_LOG = '/var/log/cloud-init.log' diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index ba616781..6d12c437 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -78,8 +78,9 @@ class Cloud(object):      def get_locale(self):          return self.datasource.get_locale() -    def get_hostname(self, fqdn=False): -        return self.datasource.get_hostname(fqdn=fqdn) +    def get_hostname(self, fqdn=False, metadata_only=False): +        return self.datasource.get_hostname( +            fqdn=fqdn, metadata_only=metadata_only)      def device_name_to_device(self, name):          return self.datasource.device_name_to_device(name) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index d2f1b778..3f2dbb93 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -40,6 +40,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,  from cloudinit import atomic_helper +from cloudinit.config import cc_set_hostname  from cloudinit.dhclient_hook import LogDhclient @@ -215,12 +216,10 @@ def main_init(name, args):      if args.local:          deps = [sources.DEP_FILESYSTEM] -    early_logs = [] -    early_logs.append( -        attempt_cmdline_url( -            path=os.path.join("%s.d" % CLOUD_CONFIG, -                              "91_kernel_cmdline_url.cfg"), -            network=not args.local)) +    early_logs = [attempt_cmdline_url( +        path=os.path.join("%s.d" % CLOUD_CONFIG, +                          "91_kernel_cmdline_url.cfg"), +        network=not args.local)]      # Cloud-init 'init' stage is broken up into the following sub-stages      # 1. Ensure that the init object fetches its config without errors @@ -354,6 +353,11 @@ def main_init(name, args):      LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",                mode, name, iid, init.is_new_instance()) +    if mode == sources.DSMODE_LOCAL: +        # Before network comes up, set any configured hostname to allow +        # dhcp clients to advertize this hostname to any DDNS services +        # LP: #1746455. +        _maybe_set_hostname(init, stage='local', retry_stage='network')      init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))      if mode == sources.DSMODE_LOCAL: @@ -370,6 +374,7 @@ def main_init(name, args):      init.setup_datasource()      # update fully realizes user-data (pulling in #include if necessary)      init.update() +    _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config')      # Stage 7      try:          # Attempt to consume the data per instance. @@ -683,6 +688,24 @@ def status_wrapper(name, args, data_d=None, link_d=None):      return len(v1[mode]['errors']) +def _maybe_set_hostname(init, stage, retry_stage): +    """Call set-hostname if metadata, vendordata or userdata provides it. + +    @param stage: String representing current stage in which we are running. +    @param retry_stage: String represented logs upon error setting hostname. +    """ +    cloud = init.cloudify() +    (hostname, _fqdn) = util.get_hostname_fqdn( +        init.cfg, cloud, metadata_only=True) +    if hostname:  # meta-data or user-data hostname content +        try: +            cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) +        except cc_set_hostname.SetHostnameError as e: +            LOG.debug( +                'Failed setting hostname in %s stage. Will' +                ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) + +  def main_features(name, args):      sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index 6713af4f..5a3ec3bf 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -165,10 +165,11 @@ class TestClean(CiTestCase):              wrap_and_call(                  'cloudinit.cmd.clean',                  {'Init': {'side_effect': self.init_class}, +                 'sys.exit': {'side_effect': self.sys_exit},                   'sys.argv': {'new': ['clean', '--logs']}},                  clean.main) -        self.assertRaisesCodeEqual(0, context_manager.exception.code) +        self.assertEqual(0, context_manager.exception.code)          self.assertFalse(              os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py new file mode 100644 index 00000000..dbe421c0 --- /dev/null +++ b/cloudinit/cmd/tests/test_main.py @@ -0,0 +1,161 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +import copy +import os +from six import StringIO + +from cloudinit.cmd import main +from cloudinit.util import ( +    ensure_dir, load_file, write_file, yaml_dumps) +from cloudinit.tests.helpers import ( +    FilesystemMockingTestCase, wrap_and_call) + +mypaths = namedtuple('MyPaths', 'run_dir') +myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') + + +class TestMain(FilesystemMockingTestCase): + +    with_logs = True + +    def setUp(self): +        super(TestMain, self).setUp() +        self.new_root = self.tmp_dir() +        self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) +        os.makedirs(self.cloud_dir) +        self.replicateTestRoot('simple_ubuntu', self.new_root) +        self.cfg = { +            'datasource_list': ['None'], +            'runcmd': ['ls /etc'],  # test ALL_DISTROS +            'system_info': {'paths': {'cloud_dir': self.cloud_dir, +                                      'run_dir': self.new_root}}, +            'write_files': [ +                { +                    'path': '/etc/blah.ini', +                    'content': 'blah', +                    'permissions': 0o755, +                }, +            ], +            'cloud_init_modules': ['write-files', 'runcmd'], +        } +        cloud_cfg = yaml_dumps(self.cfg) +        ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) +        self.cloud_cfg_file = os.path.join( +            self.new_root, 'etc', 'cloud', 'cloud.cfg') +        write_file(self.cloud_cfg_file, cloud_cfg) +        self.patchOS(self.new_root) +        self.patchUtils(self.new_root) +        self.stderr = StringIO() +        self.patchStdoutAndStderr(stderr=self.stderr) + +    def test_main_init_run_net_stops_on_file_no_net(self): +        """When no-net file is present, main_init does not process modules.""" +        stop_file = os.path.join(self.cloud_dir, 'data', 'no-net')  # stop file +        write_file(stop_file, '') +        cmdargs = myargs( +            debug=False, files=None, force=False, local=False, reporter=None, +            subcommand='init') +        (item1, item2) = wrap_and_call( +            'cloudinit.cmd.main', +            {'util.close_stdin': True, +             'netinfo.debug_info': 'my net debug info', +             'util.fixup_output': ('outfmt', 'errfmt')}, +            main.main_init, 'init', cmdargs) +        # We should not run write_files module +        self.assertFalse( +            os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')), +            'Unexpected run of write_files module produced blah.ini') +        self.assertEqual([], item2) +        # Instancify is called +        instance_id_path = 'var/lib/cloud/data/instance-id' +        self.assertFalse( +            os.path.exists(os.path.join(self.new_root, instance_id_path)), +            'Unexpected call to datasource.instancify produced instance-id') +        expected_logs = [ +            "Exiting. stop file ['{stop_file}'] existed\n".format( +                stop_file=stop_file), +            'my net debug info'  # netinfo.debug_info +        ] +        for log in expected_logs: +            self.assertIn(log, self.stderr.getvalue()) + +    def test_main_init_run_net_runs_modules(self): +        """Modules like write_files are run in 'net' mode.""" +        cmdargs = myargs( +            debug=False, files=None, force=False, local=False, reporter=None, +            subcommand='init') +        (item1, item2) = wrap_and_call( +            'cloudinit.cmd.main', +            {'util.close_stdin': True, +             'netinfo.debug_info': 'my net debug info', +             'util.fixup_output': ('outfmt', 'errfmt')}, +            main.main_init, 'init', cmdargs) +        self.assertEqual([], item2) +        # Instancify is called +        instance_id_path = 'var/lib/cloud/data/instance-id' +        self.assertEqual( +            'iid-datasource-none\n', +            os.path.join(load_file( +                os.path.join(self.new_root, instance_id_path)))) +        # modules are run (including write_files) +        self.assertEqual( +            'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) +        expected_logs = [ +            'network config is disabled by fallback',  # apply_network_config +            'my net debug info',  # netinfo.debug_info +            'no previous run detected' +        ] +        for log in expected_logs: +            self.assertIn(log, self.stderr.getvalue()) + +    def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): +        """When local-hostname metadata is present, call cc_set_hostname.""" +        self.cfg['datasource'] = { +            'None': {'metadata': {'local-hostname': 'md-hostname'}}} +        cloud_cfg = yaml_dumps(self.cfg) +        write_file(self.cloud_cfg_file, cloud_cfg) +        cmdargs = myargs( +            debug=False, files=None, force=False, local=False, reporter=None, +            subcommand='init') + +        def set_hostname(name, cfg, cloud, log, args): +            self.assertEqual('set-hostname', name) +            updated_cfg = copy.deepcopy(self.cfg) +            updated_cfg.update( +                {'def_log_file': '/var/log/cloud-init.log', +                 'log_cfgs': [], +                 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'], +                 'vendor_data': {'enabled': True, 'prefix': []}}) +            updated_cfg.pop('system_info') + +            self.assertEqual(updated_cfg, cfg) +            self.assertEqual(main.LOG, log) +            self.assertIsNone(args) + +        (item1, item2) = wrap_and_call( +            'cloudinit.cmd.main', +            {'util.close_stdin': True, +             'netinfo.debug_info': 'my net debug info', +             'cc_set_hostname.handle': {'side_effect': set_hostname}, +             'util.fixup_output': ('outfmt', 'errfmt')}, +            main.main_init, 'init', cmdargs) +        self.assertEqual([], item2) +        # Instancify is called +        instance_id_path = 'var/lib/cloud/data/instance-id' +        self.assertEqual( +            'iid-datasource-none\n', +            os.path.join(load_file( +                os.path.join(self.new_root, instance_id_path)))) +        # modules are run (including write_files) +        self.assertEqual( +            'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) +        expected_logs = [ +            'network config is disabled by fallback',  # apply_network_config +            'my net debug info',  # netinfo.debug_info +            'no previous run detected' +        ] +        for log in expected_logs: +            self.assertIn(log, self.stderr.getvalue()) + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index 4a5a8c06..37a89936 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -380,10 +380,11 @@ class TestStatus(CiTestCase):                  wrap_and_call(                      'cloudinit.cmd.status',                      {'sys.argv': {'new': ['status']}, +                     'sys.exit': {'side_effect': self.sys_exit},                       '_is_cloudinit_disabled': (False, ''),                       'Init': {'side_effect': self.init_class}},                      status.main) -        self.assertRaisesCodeEqual(0, context_manager.exception.code) +        self.assertEqual(0, context_manager.exception.code)          self.assertEqual('status: running\n', m_stdout.getvalue())  # vi: ts=4 expandtab syntax=python diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index efedd4ae..aff4010e 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -63,9 +63,7 @@ def handle(name, cfg, cloud, log, _args):                                               ["ssh-dss"])      try: -        cmd = [helper_path] -        cmd.append(','.join(fp_blacklist)) -        cmd.append(','.join(key_blacklist)) +        cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)]          (stdout, _stderr) = util.subp(cmd)          util.multi_log("%s\n" % (stdout.strip()),                         stderr=False, console=True) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 28b1d568..4190a20b 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -21,6 +21,13 @@ under ``version``, and defaults to ``none``, which selects the latest version  in the repos. If the ``puppet`` config key exists in the config archive, this  module will attempt to start puppet even if no installation was performed. +The module also provides keys for configuring the new puppet 4 paths and +installing the puppet package from the puppetlabs repositories: +https://docs.puppet.com/puppet/4.2/reference/whered_it_go.html +The keys are ``package_name``, ``conf_file`` and ``ssl_dir``. If unset, their +values will default to ones that work with puppet 3.x and with distributions +that ship modified puppet 4.x that uses the old paths. +  Puppet configuration can be specified under the ``conf`` key. The  configuration is specified as a dictionary containing high-level ``<section>``  keys and lists of ``<key>=<value>`` pairs within each section. Each section @@ -44,6 +51,9 @@ in pem format as a multi-line string (using the ``|`` yaml notation).      puppet:          install: <true/false>          version: <version> +        conf_file: '/etc/puppet/puppet.conf' +        ssl_dir: '/var/lib/puppet/ssl' +        package_name: 'puppet'          conf:              agent:                  server: "puppetmaster.example.org" @@ -63,9 +73,17 @@ from cloudinit import helpers  from cloudinit import util  PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' -PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/'  PUPPET_SSL_DIR = '/var/lib/puppet/ssl' -PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem' +PUPPET_PACKAGE_NAME = 'puppet' + + +class PuppetConstants(object): + +    def __init__(self, puppet_conf_file, puppet_ssl_dir, log): +        self.conf_path = puppet_conf_file +        self.ssl_dir = puppet_ssl_dir +        self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") +        self.ssl_cert_path = os.path.join(self.ssl_cert_dir, "ca.pem")  def _autostart_puppet(log): @@ -92,22 +110,29 @@ def handle(name, cfg, cloud, log, _args):          return      puppet_cfg = cfg['puppet'] -      # Start by installing the puppet package if necessary...      install = util.get_cfg_option_bool(puppet_cfg, 'install', True)      version = util.get_cfg_option_str(puppet_cfg, 'version', None) +    package_name = util.get_cfg_option_str( +        puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME) +    conf_file = util.get_cfg_option_str( +        puppet_cfg, 'conf_file', PUPPET_CONF_PATH) +    ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) + +    p_constants = PuppetConstants(conf_file, ssl_dir, log)      if not install and version:          log.warn(("Puppet install set false but version supplied,"                    " doing nothing."))      elif install:          log.debug(("Attempting to install puppet %s,"),                    version if version else 'latest') -        cloud.distro.install_packages(('puppet', version)) + +        cloud.distro.install_packages((package_name, version))      # ... and then update the puppet configuration      if 'conf' in puppet_cfg:          # Add all sections from the conf object to puppet.conf -        contents = util.load_file(PUPPET_CONF_PATH) +        contents = util.load_file(p_constants.conf_path)          # Create object for reading puppet.conf values          puppet_config = helpers.DefaultingConfigParser()          # Read puppet.conf values from original file in order to be able to @@ -115,20 +140,23 @@ def handle(name, cfg, cloud, log, _args):          # (TODO(harlowja) is this really needed??)          cleaned_lines = [i.lstrip() for i in contents.splitlines()]          cleaned_contents = '\n'.join(cleaned_lines) -        puppet_config.readfp(StringIO(cleaned_contents), -                             filename=PUPPET_CONF_PATH) +        # Move to puppet_config.read_file when dropping py2.7 +        puppet_config.readfp(   # pylint: disable=W1505 +            StringIO(cleaned_contents), +            filename=p_constants.conf_path)          for (cfg_name, cfg) in puppet_cfg['conf'].items():              # Cert configuration is a special case              # Dump the puppet master ca certificate in the correct place              if cfg_name == 'ca_cert':                  # Puppet ssl sub-directory isn't created yet                  # Create it with the proper permissions and ownership -                util.ensure_dir(PUPPET_SSL_DIR, 0o771) -                util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') -                util.ensure_dir(PUPPET_SSL_CERT_DIR) -                util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') -                util.write_file(PUPPET_SSL_CERT_PATH, cfg) -                util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') +                util.ensure_dir(p_constants.ssl_dir, 0o771) +                util.chownbyname(p_constants.ssl_dir, 'puppet', 'root') +                util.ensure_dir(p_constants.ssl_cert_dir) + +                util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root') +                util.write_file(p_constants.ssl_cert_path, cfg) +                util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root')              else:                  # Iterate through the config items, we'll use ConfigParser.set                  # to overwrite or create new items as needed @@ -144,8 +172,9 @@ def handle(name, cfg, cloud, log, _args):                      puppet_config.set(cfg_name, o, v)              # We got all our config as wanted we'll rename              # the previous puppet.conf and create our new one -            util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH)) -            util.write_file(PUPPET_CONF_PATH, puppet_config.stringify()) +            util.rename(p_constants.conf_path, "%s.old" +                        % (p_constants.conf_path)) +            util.write_file(p_constants.conf_path, puppet_config.stringify())      # Set it up so it autostarts      _autostart_puppet(log) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index cec22bb7..c8e1752f 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -84,6 +84,10 @@ def _resize_ufs(mount_point, devpth):      return ('growfs', devpth) +def _resize_zfs(mount_point, devpth): +    return ('zpool', 'online', '-e', mount_point, devpth) + +  def _get_dumpfs_output(mount_point):      dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point])      return dumpfs_res @@ -148,6 +152,7 @@ RESIZE_FS_PREFIXES_CMDS = [      ('ext', _resize_ext),      ('xfs', _resize_xfs),      ('ufs', _resize_ufs), +    ('zfs', _resize_zfs),  ]  RESIZE_FS_PRECHECK_CMDS = { @@ -188,6 +193,13 @@ def maybe_get_writable_device_path(devpath, info, log):          log.debug("Not attempting to resize devpath '%s': %s", devpath, info)          return None +    # FreeBSD zpool can also just use gpt/<label> +    # with that in mind we can not do an os.stat on "gpt/whatever" +    # therefore return the devpath already here. +    if devpath.startswith('gpt/'): +        log.debug('We have a gpt label - just go ahead') +        return devpath +      try:          statret = os.stat(devpath)      except OSError as exc: @@ -231,6 +243,16 @@ def handle(name, cfg, _cloud, log, args):      (devpth, fs_type, mount_point) = result +    # if we have a zfs then our device path at this point +    # is the zfs label. For example: vmzroot/ROOT/freebsd +    # we will have to get the zpool name out of this +    # and set the resize_what variable to the zpool +    # so the _resize_zfs function gets the right attribute. +    if fs_type == 'zfs': +        zpool = devpth.split('/')[0] +        devpth = util.get_device_info_from_zpool(zpool) +        resize_what = zpool +      info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)      log.debug("resize_info: %s" % info) diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 449872f0..539cbd5d 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -39,8 +39,10 @@ schema = {          using ``sh``.          .. note:: -        all commands must be proper yaml, so you have to quote any characters -        yaml would eat (':' can be problematic)"""), + +          all commands must be proper yaml, so you have to quote any characters +          yaml would eat (':' can be problematic) +    """),      'distros': distros,      'examples': [dedent("""\          runcmd: diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index 2b388372..d6a21d72 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -12,7 +12,9 @@ key is present in the config parts, then salt minion will be installed and  started. Configuration for salt minion can be specified in the ``conf`` key  under ``salt_minion``. Any conf values present there will be assigned in  ``/etc/salt/minion``. The public and private keys to use for salt minion can be -specified with ``public_key`` and ``private_key`` respectively. +specified with ``public_key`` and ``private_key`` respectively. Optionally if +you have a custom package name, service name or config directory you can +specify them with ``pkg_name``, ``service_name`` and ``config_dir``.  **Internal name:** ``cc_salt_minion`` @@ -23,8 +25,14 @@ specified with ``public_key`` and ``private_key`` respectively.  **Config keys**::      salt_minion: +        pkg_name: 'salt-minion' +        service_name: 'salt-minion' +        config_dir: '/etc/salt'          conf:              master: salt.example.com +        grains: +            role: +                - web          public_key: |              ------BEGIN PUBLIC KEY-------              <key data> @@ -39,7 +47,34 @@ import os  from cloudinit import util -# Note: see http://saltstack.org/topics/installation/ +# Note: see https://docs.saltstack.com/en/latest/topics/installation/ +# Note: see https://docs.saltstack.com/en/latest/ref/configuration/ + + +class SaltConstants(object): +    """ +    defines default distribution specific salt variables +    """ +    def __init__(self, cfg): + +        # constants tailored for FreeBSD +        if util.is_FreeBSD(): +            self.pkg_name = 'py27-salt' +            self.srv_name = 'salt_minion' +            self.conf_dir = '/usr/local/etc/salt' +        # constants for any other OS +        else: +            self.pkg_name = 'salt-minion' +            self.srv_name = 'salt-minion' +            self.conf_dir = '/etc/salt' + +        # if there are constants given in cloud config use those +        self.pkg_name = util.get_cfg_option_str(cfg, 'pkg_name', +                                                self.pkg_name) +        self.conf_dir = util.get_cfg_option_str(cfg, 'config_dir', +                                                self.conf_dir) +        self.srv_name = util.get_cfg_option_str(cfg, 'service_name', +                                                self.srv_name)  def handle(name, cfg, cloud, log, _args): @@ -49,39 +84,49 @@ def handle(name, cfg, cloud, log, _args):                     " no 'salt_minion' key in configuration"), name)          return -    salt_cfg = cfg['salt_minion'] +    s_cfg = cfg['salt_minion'] +    const = SaltConstants(cfg=s_cfg)      # Start by installing the salt package ... -    cloud.distro.install_packages(('salt-minion',)) +    cloud.distro.install_packages(const.pkg_name)      # Ensure we can configure files at the right dir -    config_dir = salt_cfg.get("config_dir", '/etc/salt') -    util.ensure_dir(config_dir) +    util.ensure_dir(const.conf_dir)      # ... and then update the salt configuration -    if 'conf' in salt_cfg: -        # Add all sections from the conf object to /etc/salt/minion -        minion_config = os.path.join(config_dir, 'minion') -        minion_data = util.yaml_dumps(salt_cfg.get('conf')) +    if 'conf' in s_cfg: +        # Add all sections from the conf object to minion config file +        minion_config = os.path.join(const.conf_dir, 'minion') +        minion_data = util.yaml_dumps(s_cfg.get('conf'))          util.write_file(minion_config, minion_data) +    if 'grains' in s_cfg: +        # add grains to /etc/salt/grains +        grains_config = os.path.join(const.conf_dir, 'grains') +        grains_data = util.yaml_dumps(s_cfg.get('grains')) +        util.write_file(grains_config, grains_data) +      # ... copy the key pair if specified -    if 'public_key' in salt_cfg and 'private_key' in salt_cfg: -        if os.path.isdir("/etc/salt/pki/minion"): -            pki_dir_default = "/etc/salt/pki/minion" -        else: -            pki_dir_default = "/etc/salt/pki" +    if 'public_key' in s_cfg and 'private_key' in s_cfg: +        pki_dir_default = os.path.join(const.conf_dir, "pki/minion") +        if not os.path.isdir(pki_dir_default): +            pki_dir_default = os.path.join(const.conf_dir, "pki") -        pki_dir = salt_cfg.get('pki_dir', pki_dir_default) +        pki_dir = s_cfg.get('pki_dir', pki_dir_default)          with util.umask(0o77):              util.ensure_dir(pki_dir)              pub_name = os.path.join(pki_dir, 'minion.pub')              pem_name = os.path.join(pki_dir, 'minion.pem') -            util.write_file(pub_name, salt_cfg['public_key']) -            util.write_file(pem_name, salt_cfg['private_key']) +            util.write_file(pub_name, s_cfg['public_key']) +            util.write_file(pem_name, s_cfg['private_key']) + +    # we need to have the salt minion service enabled in rc in order to be +    # able to start the service. this does only apply on FreeBSD servers. +    if cloud.distro.osfamily == 'freebsd': +        cloud.distro.updatercconf('salt_minion_enable', 'YES') -    # restart salt-minion.  'service' will start even if not started.  if it +    # restart salt-minion. 'service' will start even if not started. if it      # was started, it needs to be restarted for config change. -    util.subp(['service', 'salt-minion', 'restart'], capture=False) +    util.subp(['service', const.srv_name, 'restart'], capture=False)  # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index aa3dfe5f..3d2b2da3 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -32,22 +32,51 @@ will be used.      hostname: <fqdn/hostname>  """ +import os + + +from cloudinit.atomic_helper import write_json  from cloudinit import util +class SetHostnameError(Exception): +    """Raised when the distro runs into an exception when setting hostname. + +    This may happen if we attempt to set the hostname early in cloud-init's +    init-local timeframe as certain services may not be running yet. +    """ +    pass + +  def handle(name, cfg, cloud, log, _args):      if util.get_cfg_option_bool(cfg, "preserve_hostname", False):          log.debug(("Configuration option 'preserve_hostname' is set,"                     " not setting the hostname in module %s"), name)          return -      (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) +    # Check for previous successful invocation of set-hostname + +    # set-hostname artifact file accounts for both hostname and fqdn +    # deltas. As such, it's format is different than cc_update_hostname's +    # previous-hostname file which only contains the base hostname. +    # TODO consolidate previous-hostname and set-hostname artifact files and +    # distro._read_hostname implementation so we only validate  one artifact. +    prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname") +    prev_hostname = {} +    if os.path.exists(prev_fn): +        prev_hostname = util.load_json(util.load_file(prev_fn)) +    hostname_changed = (hostname != prev_hostname.get('hostname') or +                        fqdn != prev_hostname.get('fqdn')) +    if not hostname_changed: +        log.debug('No hostname changes. Skipping set-hostname') +        return +    log.debug("Setting the hostname to %s (%s)", fqdn, hostname)      try: -        log.debug("Setting the hostname to %s (%s)", fqdn, hostname)          cloud.distro.set_hostname(hostname, fqdn) -    except Exception: -        util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn, -                    hostname) -        raise +    except Exception as e: +        msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname) +        util.logexc(log, msg) +        raise SetHostnameError("%s: %s" % (msg, e)) +    write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})  # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py new file mode 100644 index 00000000..34a53fd4 --- /dev/null +++ b/cloudinit/config/cc_snap.py @@ -0,0 +1,230 @@ +# Copyright (C) 2018 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Snap: Install, configure and manage snapd and snap packages.""" + +import sys +from textwrap import dedent + +from cloudinit import log as logging +from cloudinit.config.schema import ( +    get_schema_doc, validate_cloudconfig_schema) +from cloudinit.settings import PER_INSTANCE +from cloudinit.subp import prepend_base_command +from cloudinit import util + + +distros = ['ubuntu'] +frequency = PER_INSTANCE + +LOG = logging.getLogger(__name__) + +schema = { +    'id': 'cc_snap', +    'name': 'Snap', +    'title': 'Install, configure and manage snapd and snap packages', +    'description': dedent("""\ +        This module provides a simple configuration namespace in cloud-init to +        both setup snapd and install snaps. + +        .. note:: +            Both ``assertions`` and ``commands`` values can be either a +            dictionary or a list. If these configs are provided as a +            dictionary, the keys are only used to order the execution of the +            assertions or commands and the dictionary is merged with any +            vendor-data snap configuration provided. If a list is provided by +            the user instead of a dict, any vendor-data snap configuration is +            ignored. + +        The ``assertions`` configuration option is a dictionary or list of +        properly-signed snap assertions which will run before any snap +        ``commands``. They will be added to snapd's assertion database by +        invoking ``snap ack <aggregate_assertion_file>``. + +        Snap ``commands`` is a dictionary or list of individual snap +        commands to run on the target system. These commands can be used to +        create snap users, install snaps and provide snap configuration. + +        .. note:: +            If 'side-loading' private/unpublished snaps on an instance, it is +            best to create a snap seed directory and seed.yaml manifest in +            **/var/lib/snapd/seed/** which snapd automatically installs on +            startup. + +        **Development only**: The ``squashfuse_in_container`` boolean can be +        set true to install squashfuse package when in a container to enable +        snap installs. Default is false. +        """), +    'distros': distros, +    'examples': [dedent("""\ +        snap: +            assertions: +              00: | +              signed_assertion_blob_here +              02: | +              signed_assertion_blob_here +            commands: +              00: snap create-user --sudoer --known <snap-user>@mydomain.com +              01: snap install canonical-livepatch +              02: canonical-livepatch enable <AUTH_TOKEN> +    """), dedent("""\ +        # LXC-based containers require squashfuse before snaps can be installed +        snap: +            commands: +                00: apt-get install squashfuse -y +                11: snap install emoj + +    """), dedent("""\ +        # Convenience: the snap command can be omitted when specifying commands +        # as a list and 'snap' will automatically be prepended. +        # The following commands are equivalent: +        snap: +            commands: +                00: ['install', 'vlc'] +                01: ['snap', 'install', 'vlc'] +                02: snap install vlc +                03: 'snap install vlc' +    """)], +    'frequency': PER_INSTANCE, +    'type': 'object', +    'properties': { +        'snap': { +            'type': 'object', +            'properties': { +                'assertions': { +                    'type': ['object', 'array'],  # Array of strings or dict +                    'items': {'type': 'string'}, +                    'additionalItems': False,  # Reject items non-string +                    'minItems': 1, +                    'minProperties': 1, +                    'uniqueItems': True +                }, +                'commands': { +                    'type': ['object', 'array'],  # Array of strings or dict +                    'items': { +                        'oneOf': [ +                            {'type': 'array', 'items': {'type': 'string'}}, +                            {'type': 'string'}] +                    }, +                    'additionalItems': False,  # Reject non-string & non-list +                    'minItems': 1, +                    'minProperties': 1, +                    'uniqueItems': True +                }, +                'squashfuse_in_container': { +                    'type': 'boolean' +                } +            }, +            'additionalProperties': False,  # Reject keys not in schema +            'required': [], +            'minProperties': 1 +        } +    } +} + +# TODO schema for 'assertions' and 'commands' are too permissive at the moment. +# Once python-jsonschema supports schema draft 6 add support for arbitrary +# object keys with 'patternProperties' constraint to validate string values. + +__doc__ = get_schema_doc(schema)  # Supplement python help() + +SNAP_CMD = "snap" +ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions" + + +def add_assertions(assertions): +    """Import list of assertions. + +    Import assertions by concatenating each assertion into a +    string separated by a '\n'.  Write this string to a instance file and +    then invoke `snap ack /path/to/file` and check for errors. +    If snap exits 0, then all assertions are imported. +    """ +    if not assertions: +        return +    LOG.debug('Importing user-provided snap assertions') +    if isinstance(assertions, dict): +        assertions = assertions.values() +    elif not isinstance(assertions, list): +        raise TypeError( +            'assertion parameter was not a list or dict: {assertions}'.format( +                assertions=assertions)) + +    snap_cmd = [SNAP_CMD, 'ack'] +    combined = "\n".join(assertions) + +    for asrt in assertions: +        LOG.debug('Snap acking: %s', asrt.split('\n')[0:2]) + +    util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) +    util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True) + + +def run_commands(commands): +    """Run the provided commands provided in snap:commands configuration. + +     Commands are run individually. Any errors are collected and reported +     after attempting all commands. + +     @param commands: A list or dict containing commands to run. Keys of a +         dict will be used to order the commands provided as dict values. +     """ +    if not commands: +        return +    LOG.debug('Running user-provided snap commands') +    if isinstance(commands, dict): +        # Sort commands based on dictionary key +        commands = [v for _, v in sorted(commands.items())] +    elif not isinstance(commands, list): +        raise TypeError( +            'commands parameter was not a list or dict: {commands}'.format( +                commands=commands)) + +    fixed_snap_commands = prepend_base_command('snap', commands) + +    cmd_failures = [] +    for command in fixed_snap_commands: +        shell = isinstance(command, str) +        try: +            util.subp(command, shell=shell, status_cb=sys.stderr.write) +        except util.ProcessExecutionError as e: +            cmd_failures.append(str(e)) +    if cmd_failures: +        msg = 'Failures running snap commands:\n{cmd_failures}'.format( +            cmd_failures=cmd_failures) +        util.logexc(LOG, msg) +        raise RuntimeError(msg) + + +# RELEASE_BLOCKER: Once LP: #1628289 is released on xenial, drop this function. +def maybe_install_squashfuse(cloud): +    """Install squashfuse if we are in a container.""" +    if not util.is_container(): +        return +    try: +        cloud.distro.update_package_sources() +    except Exception as e: +        util.logexc(LOG, "Package update failed") +        raise +    try: +        cloud.distro.install_packages(['squashfuse']) +    except Exception as e: +        util.logexc(LOG, "Failed to install squashfuse") +        raise + + +def handle(name, cfg, cloud, log, args): +    cfgin = cfg.get('snap', {}) +    if not cfgin: +        LOG.debug(("Skipping module named %s," +                   " no 'snap' key in configuration"), name) +        return + +    validate_cloudconfig_schema(cfg, schema) +    if util.is_true(cfgin.get('squashfuse_in_container', False)): +        maybe_install_squashfuse(cloud) +    add_assertions(cfgin.get('assertions', [])) +    run_commands(cfgin.get('commands', [])) + +# vi: ts=4 expandtab diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py index e82c0811..afe297ee 100644 --- a/cloudinit/config/cc_snap_config.py +++ b/cloudinit/config/cc_snap_config.py @@ -4,11 +4,15 @@  #  # This file is part of cloud-init. See LICENSE file for license information. +# RELEASE_BLOCKER: Remove this deprecated module in 18.3  """  Snap Config  -----------  **Summary:** snap_config modules allows configuration of snapd. +**Deprecated**: Use :ref:`snap` module instead. This module will not exist +in cloud-init 18.3. +  This module uses the same ``snappy`` namespace for configuration but  acts only only a subset of the configuration. @@ -154,6 +158,9 @@ def handle(name, cfg, cloud, log, args):          LOG.debug('No snappy config provided, skipping')          return +    log.warning( +        'DEPRECATION: snap_config module will be dropped in 18.3 release.' +        ' Use snap module instead')      if not(util.system_is_snappy()):          LOG.debug("%s: system not snappy", name)          return diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index eecb8178..bab80bbe 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -1,10 +1,14 @@  # This file is part of cloud-init. See LICENSE file for license information. +# RELEASE_BLOCKER: Remove this deprecated module in 18.3  """  Snappy  ------  **Summary:** snappy modules allows configuration of snappy. +**Deprecated**: Use :ref:`snap` module instead. This module will not exist +in cloud-init 18.3. +  The below example config config would install ``etcd``, and then install  ``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has  ``config-blob`` inside it. If ``pkgname`` is installed already, then @@ -271,6 +275,10 @@ def handle(name, cfg, cloud, log, args):          LOG.debug("%s: 'auto' mode, and system not snappy", name)          return +    log.warning( +        'DEPRECATION: snappy module will be dropped in 18.3 release.' +        ' Use snap module instead') +      set_snappy_command()      pkg_ops = get_package_ops(packages=mycfg['packages'], diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 35d8c57f..98b0e665 100755 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -77,11 +77,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',      tbl = SimpleTable(tbl_fields)      for entry in key_entries:          if _is_printable_key(entry): -            row = [] -            row.append(entry.keytype or '-') -            row.append(_gen_fingerprint(entry.base64, hash_meth) or '-') -            row.append(entry.options or '-') -            row.append(entry.comment or '-') +            row = [entry.keytype or '-', +                   _gen_fingerprint(entry.base64, hash_meth) or '-', +                   entry.options or '-', +                   entry.comment or '-']              tbl.add_row(row)      authtbl_s = tbl.get_string()      authtbl_lines = authtbl_s.splitlines() diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py new file mode 100644 index 00000000..16b1868b --- /dev/null +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -0,0 +1,173 @@ +# Copyright (C) 2018 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Ubuntu advantage: manage ubuntu-advantage offerings from Canonical.""" + +import sys +from textwrap import dedent + +from cloudinit import log as logging +from cloudinit.config.schema import ( +    get_schema_doc, validate_cloudconfig_schema) +from cloudinit.settings import PER_INSTANCE +from cloudinit.subp import prepend_base_command +from cloudinit import util + + +distros = ['ubuntu'] +frequency = PER_INSTANCE + +LOG = logging.getLogger(__name__) + +schema = { +    'id': 'cc_ubuntu_advantage', +    'name': 'Ubuntu Advantage', +    'title': 'Install, configure and manage ubuntu-advantage offerings', +    'description': dedent("""\ +        This module provides configuration options to setup ubuntu-advantage +        subscriptions. + +        .. note:: +            Both ``commands`` value can be either a dictionary or a list. If +            the configuration provided is a dictionary, the keys are only used +            to order the execution of the commands and the dictionary is +            merged with any vendor-data ubuntu-advantage configuration +            provided. If a ``commands`` is provided as a list, any vendor-data +            ubuntu-advantage ``commands`` are ignored. + +        Ubuntu-advantage ``commands`` is a dictionary or list of +        ubuntu-advantage commands to run on the deployed machine. +        These commands can be used to enable or disable subscriptions to +        various ubuntu-advantage products. See 'man ubuntu-advantage' for more +        information on supported subcommands. + +        .. note:: +           Each command item can be a string or list. If the item is a list, +           'ubuntu-advantage' can be omitted and it will automatically be +           inserted as part of the command. +        """), +    'distros': distros, +    'examples': [dedent("""\ +        # Enable Extended Security Maintenance using your service auth token +        ubuntu-advantage: +            commands: +              00: ubuntu-advantage enable-esm <token> +    """), dedent("""\ +        # Enable livepatch by providing your livepatch token +        ubuntu-advantage: +            commands: +                00: ubuntu-advantage enable-livepatch <livepatch-token> + +    """), dedent("""\ +        # Convenience: the ubuntu-advantage command can be omitted when +        # specifying commands as a list and 'ubuntu-advantage' will +        # automatically be prepended. +        # The following commands are equivalent +        ubuntu-advantage: +            commands: +                00: ['enable-livepatch', 'my-token'] +                01: ['ubuntu-advantage', 'enable-livepatch', 'my-token'] +                02: ubuntu-advantage enable-livepatch my-token +                03: 'ubuntu-advantage enable-livepatch my-token' +    """)], +    'frequency': PER_INSTANCE, +    'type': 'object', +    'properties': { +        'ubuntu-advantage': { +            'type': 'object', +            'properties': { +                'commands': { +                    'type': ['object', 'array'],  # Array of strings or dict +                    'items': { +                        'oneOf': [ +                            {'type': 'array', 'items': {'type': 'string'}}, +                            {'type': 'string'}] +                    }, +                    'additionalItems': False,  # Reject non-string & non-list +                    'minItems': 1, +                    'minProperties': 1, +                    'uniqueItems': True +                } +            }, +            'additionalProperties': False,  # Reject keys not in schema +            'required': ['commands'] +        } +    } +} + +# TODO schema for 'assertions' and 'commands' are too permissive at the moment. +# Once python-jsonschema supports schema draft 6 add support for arbitrary +# object keys with 'patternProperties' constraint to validate string values. + +__doc__ = get_schema_doc(schema)  # Supplement python help() + +UA_CMD = "ubuntu-advantage" + + +def run_commands(commands): +    """Run the commands provided in ubuntu-advantage:commands config. + +     Commands are run individually. Any errors are collected and reported +     after attempting all commands. + +     @param commands: A list or dict containing commands to run. Keys of a +         dict will be used to order the commands provided as dict values. +     """ +    if not commands: +        return +    LOG.debug('Running user-provided ubuntu-advantage commands') +    if isinstance(commands, dict): +        # Sort commands based on dictionary key +        commands = [v for _, v in sorted(commands.items())] +    elif not isinstance(commands, list): +        raise TypeError( +            'commands parameter was not a list or dict: {commands}'.format( +                commands=commands)) + +    fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands) + +    cmd_failures = [] +    for command in fixed_ua_commands: +        shell = isinstance(command, str) +        try: +            util.subp(command, shell=shell, status_cb=sys.stderr.write) +        except util.ProcessExecutionError as e: +            cmd_failures.append(str(e)) +    if cmd_failures: +        msg = ( +            'Failures running ubuntu-advantage commands:\n' +            '{cmd_failures}'.format( +                cmd_failures=cmd_failures)) +        util.logexc(LOG, msg) +        raise RuntimeError(msg) + + +def maybe_install_ua_tools(cloud): +    """Install ubuntu-advantage-tools if not present.""" +    if util.which('ubuntu-advantage'): +        return +    try: +        cloud.distro.update_package_sources() +    except Exception as e: +        util.logexc(LOG, "Package update failed") +        raise +    try: +        cloud.distro.install_packages(['ubuntu-advantage-tools']) +    except Exception as e: +        util.logexc(LOG, "Failed to install ubuntu-advantage-tools") +        raise + + +def handle(name, cfg, cloud, log, args): +    cfgin = cfg.get('ubuntu-advantage') +    if cfgin is None: +        LOG.debug(("Skipping module named %s," +                   " no 'ubuntu-advantage' key in configuration"), name) +        return + +    validate_cloudconfig_schema(cfg, schema) +    maybe_install_ua_tools(cloud) +    run_commands(cfgin.get('commands', [])) + +# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py new file mode 100644 index 00000000..c5b4a9de --- /dev/null +++ b/cloudinit/config/tests/test_snap.py @@ -0,0 +1,490 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re +from six import StringIO + +from cloudinit.config.cc_snap import ( +    ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse, +    run_commands, schema) +from cloudinit.config.schema import validate_cloudconfig_schema +from cloudinit import util +from cloudinit.tests.helpers import ( +    CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema) + + +SYSTEM_USER_ASSERTION = """\ +type: system-user +authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp +brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp +email: foo@bar.com +password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt +series: +- 16 +since: 2016-09-10T16:34:00+03:00 +until: 2017-11-10T16:34:00+03:00 +username: baz +sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj + +AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP +Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI +zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF +s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj ++to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP +Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS +d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q +BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H +f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V +v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""" + +ACCOUNT_ASSERTION = """\ +type: account-key +authority-id: canonical +revision: 2 +public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0 +account-id: canonical +name: store +since: 2016-04-01T00:00:00.0Z +body-length: 717 +sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH + +AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j +qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482 +vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ +UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK +Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG +o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl +VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9 +2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an +Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc +vUvV7RjVzv17ut0AEQEAAQ== + +AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM +WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b +nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL +3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL +eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY +inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1 +rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+ +rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE +aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ +6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO +haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF +yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9 +HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi +skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK +CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde +ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF +qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR +IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t +oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""" + + +class FakeCloud(object): +    def __init__(self, distro): +        self.distro = distro + + +class TestAddAssertions(CiTestCase): + +    with_logs = True + +    def setUp(self): +        super(TestAddAssertions, self).setUp() +        self.tmp = self.tmp_dir() + +    @mock.patch('cloudinit.config.cc_snap.util.subp') +    def test_add_assertions_on_empty_list(self, m_subp): +        """When provided with an empty list, add_assertions does nothing.""" +        add_assertions([]) +        self.assertEqual('', self.logs.getvalue()) +        m_subp.assert_not_called() + +    def test_add_assertions_on_non_list_or_dict(self): +        """When provided an invalid type, add_assertions raises an error.""" +        with self.assertRaises(TypeError) as context_manager: +            add_assertions(assertions="I'm Not Valid") +        self.assertEqual( +            "assertion parameter was not a list or dict: I'm Not Valid", +            str(context_manager.exception)) + +    @mock.patch('cloudinit.config.cc_snap.util.subp') +    def test_add_assertions_adds_assertions_as_list(self, m_subp): +        """When provided with a list, add_assertions adds all assertions.""" +        self.assertEqual( +            ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') +        assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) +        assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION] +        wrap_and_call( +            'cloudinit.config.cc_snap', +            {'ASSERTIONS_FILE': {'new': assert_file}}, +            add_assertions, assertions) +        self.assertIn( +            'Importing user-provided snap assertions', self.logs.getvalue()) +        self.assertIn( +            'sertions', self.logs.getvalue()) +        self.assertEqual( +            [mock.call(['snap', 'ack', assert_file], capture=True)], +            m_subp.call_args_list) +        compare_file = self.tmp_path('comparison', dir=self.tmp) +        util.write_file(compare_file, '\n'.join(assertions).encode('utf-8')) +        self.assertEqual( +            util.load_file(compare_file), util.load_file(assert_file)) + +    @mock.patch('cloudinit.config.cc_snap.util.subp') +    def test_add_assertions_adds_assertions_as_dict(self, m_subp): +        """When provided with a dict, add_assertions adds all assertions.""" +        self.assertEqual( +            ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') +        assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) +        assertions = {'00': SYSTEM_USER_ASSERTION, '01': ACCOUNT_ASSERTION} +        wrap_and_call( +            'cloudinit.config.cc_snap', +            {'ASSERTIONS_FILE': {'new': assert_file}}, +            add_assertions, assertions) +        self.assertIn( +            'Importing user-provided snap assertions', self.logs.getvalue()) +        self.assertIn( +            "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv", +            self.logs.getvalue()) +        self.assertIn( +            "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic", +            self.logs.getvalue()) +        self.assertEqual( +            [mock.call(['snap', 'ack', assert_file], capture=True)], +            m_subp.call_args_list) +        compare_file = self.tmp_path('comparison', dir=self.tmp) +        combined = '\n'.join(assertions.values()) +        util.write_file(compare_file, combined.encode('utf-8')) +        self.assertEqual( +            util.load_file(compare_file), util.load_file(assert_file)) + + +class TestRunCommands(CiTestCase): + +    with_logs = True + +    def setUp(self): +        super(TestRunCommands, self).setUp() +        self.tmp = self.tmp_dir() + +    @mock.patch('cloudinit.config.cc_snap.util.subp') +    def test_run_commands_on_empty_list(self, m_subp): +        """When provided with an empty list, run_commands does nothing.""" +        run_commands([]) +        self.assertEqual('', self.logs.getvalue()) +        m_subp.assert_not_called() + +    def test_run_commands_on_non_list_or_dict(self): +        """When provided an invalid type, run_commands raises an error.""" +        with self.assertRaises(TypeError) as context_manager: +            run_commands(commands="I'm Not Valid") +        self.assertEqual( +            "commands parameter was not a list or dict: I'm Not Valid", +            str(context_manager.exception)) + +    def test_run_command_logs_commands_and_exit_codes_to_stderr(self): +        """All exit codes are logged to stderr.""" +        outfile = self.tmp_path('output.log', dir=self.tmp) + +        cmd1 = 'echo "HI" >> %s' % outfile +        cmd2 = 'bogus command' +        cmd3 = 'echo "MOM" >> %s' % outfile +        commands = [cmd1, cmd2, cmd3] + +        mock_path = 'cloudinit.config.cc_snap.sys.stderr' +        with mock.patch(mock_path, new_callable=StringIO) as m_stderr: +            with self.assertRaises(RuntimeError) as context_manager: +                run_commands(commands=commands) + +        self.assertIsNotNone( +            re.search(r'bogus: (command )?not found', +                      str(context_manager.exception)), +            msg='Expected bogus command not found') +        expected_stderr_log = '\n'.join([ +            'Begin run command: {cmd}'.format(cmd=cmd1), +            'End run command: exit(0)', +            'Begin run command: {cmd}'.format(cmd=cmd2), +            'ERROR: End run command: exit(127)', +            'Begin run command: {cmd}'.format(cmd=cmd3), +            'End run command: exit(0)\n']) +        self.assertEqual(expected_stderr_log, m_stderr.getvalue()) + +    def test_run_command_as_lists(self): +        """When commands are specified as a list, run them in order.""" +        outfile = self.tmp_path('output.log', dir=self.tmp) + +        cmd1 = 'echo "HI" >> %s' % outfile +        cmd2 = 'echo "MOM" >> %s' % outfile +        commands = [cmd1, cmd2] +        mock_path = 'cloudinit.config.cc_snap.sys.stderr' +        with mock.patch(mock_path, new_callable=StringIO): +            run_commands(commands=commands) + +        self.assertIn( +            'DEBUG: Running user-provided snap commands', +            self.logs.getvalue()) +        self.assertEqual('HI\nMOM\n', util.load_file(outfile)) +        self.assertIn( +            'WARNING: Non-snap commands in snap config:', self.logs.getvalue()) + +    def test_run_command_dict_sorted_as_command_script(self): +        """When commands are a dict, sort them and run.""" +        outfile = self.tmp_path('output.log', dir=self.tmp) +        cmd1 = 'echo "HI" >> %s' % outfile +        cmd2 = 'echo "MOM" >> %s' % outfile +        commands = {'02': cmd1, '01': cmd2} +        mock_path = 'cloudinit.config.cc_snap.sys.stderr' +        with mock.patch(mock_path, new_callable=StringIO): +            run_commands(commands=commands) + +        expected_messages = [ +            'DEBUG: Running user-provided snap commands'] +        for message in expected_messages: +            self.assertIn(message, self.logs.getvalue()) +        self.assertEqual('MOM\nHI\n', util.load_file(outfile)) + + +@skipUnlessJsonSchema() +class TestSchema(CiTestCase): + +    with_logs = True + +    def test_schema_warns_on_snap_not_as_dict(self): +        """If the snap configuration is not a dict, emit a warning.""" +        validate_cloudconfig_schema({'snap': 'wrong type'}, schema) +        self.assertEqual( +            "WARNING: Invalid config:\nsnap: 'wrong type' is not of type" +            " 'object'\n", +            self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.run_commands') +    def test_schema_disallows_unknown_keys(self, _): +        """Unknown keys in the snap configuration emit warnings.""" +        validate_cloudconfig_schema( +            {'snap': {'commands': ['ls'], 'invalid-key': ''}}, schema) +        self.assertIn( +            'WARNING: Invalid config:\nsnap: Additional properties are not' +            " allowed ('invalid-key' was unexpected)", +            self.logs.getvalue()) + +    def test_warn_schema_requires_either_commands_or_assertions(self): +        """Warn when snap configuration lacks both commands and assertions.""" +        validate_cloudconfig_schema( +            {'snap': {}}, schema) +        self.assertIn( +            'WARNING: Invalid config:\nsnap: {} does not have enough' +            ' properties', +            self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.run_commands') +    def test_warn_schema_commands_is_not_list_or_dict(self, _): +        """Warn when snap:commands config is not a list or dict.""" +        validate_cloudconfig_schema( +            {'snap': {'commands': 'broken'}}, schema) +        self.assertEqual( +            "WARNING: Invalid config:\nsnap.commands: 'broken' is not of type" +            " 'object', 'array'\n", +            self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.run_commands') +    def test_warn_schema_when_commands_is_empty(self, _): +        """Emit warnings when snap:commands is an empty list or dict.""" +        validate_cloudconfig_schema( +            {'snap': {'commands': []}}, schema) +        validate_cloudconfig_schema( +            {'snap': {'commands': {}}}, schema) +        self.assertEqual( +            "WARNING: Invalid config:\nsnap.commands: [] is too short\n" +            "WARNING: Invalid config:\nsnap.commands: {} does not have enough" +            " properties\n", +            self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.run_commands') +    def test_schema_when_commands_are_list_or_dict(self, _): +        """No warnings when snap:commands are either a list or dict.""" +        validate_cloudconfig_schema( +            {'snap': {'commands': ['valid']}}, schema) +        validate_cloudconfig_schema( +            {'snap': {'commands': {'01': 'also valid'}}}, schema) +        self.assertEqual('', self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.add_assertions') +    def test_warn_schema_assertions_is_not_list_or_dict(self, _): +        """Warn when snap:assertions config is not a list or dict.""" +        validate_cloudconfig_schema( +            {'snap': {'assertions': 'broken'}}, schema) +        self.assertEqual( +            "WARNING: Invalid config:\nsnap.assertions: 'broken' is not of" +            " type 'object', 'array'\n", +            self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.add_assertions') +    def test_warn_schema_when_assertions_is_empty(self, _): +        """Emit warnings when snap:assertions is an empty list or dict.""" +        validate_cloudconfig_schema( +            {'snap': {'assertions': []}}, schema) +        validate_cloudconfig_schema( +            {'snap': {'assertions': {}}}, schema) +        self.assertEqual( +            "WARNING: Invalid config:\nsnap.assertions: [] is too short\n" +            "WARNING: Invalid config:\nsnap.assertions: {} does not have" +            " enough properties\n", +            self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.add_assertions') +    def test_schema_when_assertions_are_list_or_dict(self, _): +        """No warnings when snap:assertions are a list or dict.""" +        validate_cloudconfig_schema( +            {'snap': {'assertions': ['valid']}}, schema) +        validate_cloudconfig_schema( +            {'snap': {'assertions': {'01': 'also valid'}}}, schema) +        self.assertEqual('', self.logs.getvalue()) + + +class TestHandle(CiTestCase): + +    with_logs = True + +    def setUp(self): +        super(TestHandle, self).setUp() +        self.tmp = self.tmp_dir() + +    @mock.patch('cloudinit.config.cc_snap.run_commands') +    @mock.patch('cloudinit.config.cc_snap.add_assertions') +    @mock.patch('cloudinit.config.cc_snap.validate_cloudconfig_schema') +    def test_handle_no_config(self, m_schema, m_add, m_run): +        """When no snap-related configuration is provided, nothing happens.""" +        cfg = {} +        handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) +        self.assertIn( +            "DEBUG: Skipping module named snap, no 'snap' key in config", +            self.logs.getvalue()) +        m_schema.assert_not_called() +        m_add.assert_not_called() +        m_run.assert_not_called() + +    @mock.patch('cloudinit.config.cc_snap.run_commands') +    @mock.patch('cloudinit.config.cc_snap.add_assertions') +    @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') +    def test_handle_skips_squashfuse_when_unconfigured(self, m_squash, m_add, +                                                       m_run): +        """When squashfuse_in_container is unset, don't attempt to install.""" +        handle( +            'snap', cfg={'snap': {}}, cloud=None, log=self.logger, args=None) +        handle( +            'snap', cfg={'snap': {'squashfuse_in_container': None}}, +            cloud=None, log=self.logger, args=None) +        handle( +            'snap', cfg={'snap': {'squashfuse_in_container': False}}, +            cloud=None, log=self.logger, args=None) +        self.assertEqual([], m_squash.call_args_list)  # No calls +        # snap configuration missing assertions and commands will default to [] +        self.assertIn(mock.call([]), m_add.call_args_list) +        self.assertIn(mock.call([]), m_run.call_args_list) + +    @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') +    def test_handle_tries_to_install_squashfuse(self, m_squash): +        """If squashfuse_in_container is True, try installing squashfuse.""" +        cfg = {'snap': {'squashfuse_in_container': True}} +        mycloud = FakeCloud(None) +        handle('snap', cfg=cfg, cloud=mycloud, log=self.logger, args=None) +        self.assertEqual( +            [mock.call(mycloud)], m_squash.call_args_list) + +    def test_handle_runs_commands_provided(self): +        """If commands are specified as a list, run them.""" +        outfile = self.tmp_path('output.log', dir=self.tmp) + +        cfg = { +            'snap': {'commands': ['echo "HI" >> %s' % outfile, +                                  'echo "MOM" >> %s' % outfile]}} +        mock_path = 'cloudinit.config.cc_snap.sys.stderr' +        with mock.patch(mock_path, new_callable=StringIO): +            handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) +        self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + +    @mock.patch('cloudinit.config.cc_snap.util.subp') +    def test_handle_adds_assertions(self, m_subp): +        """Any configured snap assertions are provided to add_assertions.""" +        assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) +        compare_file = self.tmp_path('comparison', dir=self.tmp) +        cfg = { +            'snap': {'assertions': [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}} +        wrap_and_call( +            'cloudinit.config.cc_snap', +            {'ASSERTIONS_FILE': {'new': assert_file}}, +            handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) +        content = '\n'.join(cfg['snap']['assertions']) +        util.write_file(compare_file, content.encode('utf-8')) +        self.assertEqual( +            util.load_file(compare_file), util.load_file(assert_file)) + +    @mock.patch('cloudinit.config.cc_snap.util.subp') +    @skipUnlessJsonSchema() +    def test_handle_validates_schema(self, m_subp): +        """Any provided configuration is runs validate_cloudconfig_schema.""" +        assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) +        cfg = {'snap': {'invalid': ''}}  # Generates schema warning +        wrap_and_call( +            'cloudinit.config.cc_snap', +            {'ASSERTIONS_FILE': {'new': assert_file}}, +            handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) +        self.assertEqual( +            "WARNING: Invalid config:\nsnap: Additional properties are not" +            " allowed ('invalid' was unexpected)\n", +            self.logs.getvalue()) + + +class TestMaybeInstallSquashFuse(CiTestCase): + +    with_logs = True + +    def setUp(self): +        super(TestMaybeInstallSquashFuse, self).setUp() +        self.tmp = self.tmp_dir() + +    @mock.patch('cloudinit.config.cc_snap.util.is_container') +    def test_maybe_install_squashfuse_skips_non_containers(self, m_container): +        """maybe_install_squashfuse does nothing when not on a container.""" +        m_container.return_value = False +        maybe_install_squashfuse(cloud=FakeCloud(None)) +        self.assertEqual([mock.call()], m_container.call_args_list) +        self.assertEqual('', self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.util.is_container') +    def test_maybe_install_squashfuse_raises_install_errors(self, m_container): +        """maybe_install_squashfuse logs and raises package install errors.""" +        m_container.return_value = True +        distro = mock.MagicMock() +        distro.update_package_sources.side_effect = RuntimeError( +            'Some apt error') +        with self.assertRaises(RuntimeError) as context_manager: +            maybe_install_squashfuse(cloud=FakeCloud(distro)) +        self.assertEqual('Some apt error', str(context_manager.exception)) +        self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.util.is_container') +    def test_maybe_install_squashfuse_raises_update_errors(self, m_container): +        """maybe_install_squashfuse logs and raises package update errors.""" +        m_container.return_value = True +        distro = mock.MagicMock() +        distro.update_package_sources.side_effect = RuntimeError( +            'Some apt error') +        with self.assertRaises(RuntimeError) as context_manager: +            maybe_install_squashfuse(cloud=FakeCloud(distro)) +        self.assertEqual('Some apt error', str(context_manager.exception)) +        self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + +    @mock.patch('cloudinit.config.cc_snap.util.is_container') +    def test_maybe_install_squashfuse_happy_path(self, m_container): +        """maybe_install_squashfuse logs and raises package install errors.""" +        m_container.return_value = True +        distro = mock.MagicMock()  # No errors raised +        maybe_install_squashfuse(cloud=FakeCloud(distro)) +        self.assertEqual( +            [mock.call()], distro.update_package_sources.call_args_list) +        self.assertEqual( +            [mock.call(['squashfuse'])], +            distro.install_packages.call_args_list) + +# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py new file mode 100644 index 00000000..f2a59faf --- /dev/null +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -0,0 +1,269 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re +from six import StringIO + +from cloudinit.config.cc_ubuntu_advantage import ( +    handle, maybe_install_ua_tools, run_commands, schema) +from cloudinit.config.schema import validate_cloudconfig_schema +from cloudinit import util +from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema + + +# Module path used in mocks +MPATH = 'cloudinit.config.cc_ubuntu_advantage' + + +class FakeCloud(object): +    def __init__(self, distro): +        self.distro = distro + + +class TestRunCommands(CiTestCase): + +    with_logs = True + +    def setUp(self): +        super(TestRunCommands, self).setUp() +        self.tmp = self.tmp_dir() + +    @mock.patch('%s.util.subp' % MPATH) +    def test_run_commands_on_empty_list(self, m_subp): +        """When provided with an empty list, run_commands does nothing.""" +        run_commands([]) +        self.assertEqual('', self.logs.getvalue()) +        m_subp.assert_not_called() + +    def test_run_commands_on_non_list_or_dict(self): +        """When provided an invalid type, run_commands raises an error.""" +        with self.assertRaises(TypeError) as context_manager: +            run_commands(commands="I'm Not Valid") +        self.assertEqual( +            "commands parameter was not a list or dict: I'm Not Valid", +            str(context_manager.exception)) + +    def test_run_command_logs_commands_and_exit_codes_to_stderr(self): +        """All exit codes are logged to stderr.""" +        outfile = self.tmp_path('output.log', dir=self.tmp) + +        cmd1 = 'echo "HI" >> %s' % outfile +        cmd2 = 'bogus command' +        cmd3 = 'echo "MOM" >> %s' % outfile +        commands = [cmd1, cmd2, cmd3] + +        mock_path = '%s.sys.stderr' % MPATH +        with mock.patch(mock_path, new_callable=StringIO) as m_stderr: +            with self.assertRaises(RuntimeError) as context_manager: +                run_commands(commands=commands) + +        self.assertIsNotNone( +            re.search(r'bogus: (command )?not found', +                      str(context_manager.exception)), +            msg='Expected bogus command not found') +        expected_stderr_log = '\n'.join([ +            'Begin run command: {cmd}'.format(cmd=cmd1), +            'End run command: exit(0)', +            'Begin run command: {cmd}'.format(cmd=cmd2), +            'ERROR: End run command: exit(127)', +            'Begin run command: {cmd}'.format(cmd=cmd3), +            'End run command: exit(0)\n']) +        self.assertEqual(expected_stderr_log, m_stderr.getvalue()) + +    def test_run_command_as_lists(self): +        """When commands are specified as a list, run them in order.""" +        outfile = self.tmp_path('output.log', dir=self.tmp) + +        cmd1 = 'echo "HI" >> %s' % outfile +        cmd2 = 'echo "MOM" >> %s' % outfile +        commands = [cmd1, cmd2] +        with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): +            run_commands(commands=commands) + +        self.assertIn( +            'DEBUG: Running user-provided ubuntu-advantage commands', +            self.logs.getvalue()) +        self.assertEqual('HI\nMOM\n', util.load_file(outfile)) +        self.assertIn( +            'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage' +            ' config:', +            self.logs.getvalue()) + +    def test_run_command_dict_sorted_as_command_script(self): +        """When commands are a dict, sort them and run.""" +        outfile = self.tmp_path('output.log', dir=self.tmp) +        cmd1 = 'echo "HI" >> %s' % outfile +        cmd2 = 'echo "MOM" >> %s' % outfile +        commands = {'02': cmd1, '01': cmd2} +        with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): +            run_commands(commands=commands) + +        expected_messages = [ +            'DEBUG: Running user-provided ubuntu-advantage commands'] +        for message in expected_messages: +            self.assertIn(message, self.logs.getvalue()) +        self.assertEqual('MOM\nHI\n', util.load_file(outfile)) + + +@skipUnlessJsonSchema() +class TestSchema(CiTestCase): + +    with_logs = True + +    def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): +        """If ubuntu-advantage configuration is not a dict, emit a warning.""" +        validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema) +        self.assertEqual( +            "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not" +            " of type 'object'\n", +            self.logs.getvalue()) + +    @mock.patch('%s.run_commands' % MPATH) +    def test_schema_disallows_unknown_keys(self, _): +        """Unknown keys in ubuntu-advantage configuration emit warnings.""" +        validate_cloudconfig_schema( +            {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}}, +            schema) +        self.assertIn( +            'WARNING: Invalid config:\nubuntu-advantage: Additional properties' +            " are not allowed ('invalid-key' was unexpected)", +            self.logs.getvalue()) + +    def test_warn_schema_requires_commands(self): +        """Warn when ubuntu-advantage configuration lacks commands.""" +        validate_cloudconfig_schema( +            {'ubuntu-advantage': {}}, schema) +        self.assertEqual( +            "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a" +            " required property\n", +            self.logs.getvalue()) + +    @mock.patch('%s.run_commands' % MPATH) +    def test_warn_schema_commands_is_not_list_or_dict(self, _): +        """Warn when ubuntu-advantage:commands config is not a list or dict.""" +        validate_cloudconfig_schema( +            {'ubuntu-advantage': {'commands': 'broken'}}, schema) +        self.assertEqual( +            "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is" +            " not of type 'object', 'array'\n", +            self.logs.getvalue()) + +    @mock.patch('%s.run_commands' % MPATH) +    def test_warn_schema_when_commands_is_empty(self, _): +        """Emit warnings when ubuntu-advantage:commands is empty.""" +        validate_cloudconfig_schema( +            {'ubuntu-advantage': {'commands': []}}, schema) +        validate_cloudconfig_schema( +            {'ubuntu-advantage': {'commands': {}}}, schema) +        self.assertEqual( +            "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too" +            " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}" +            " does not have enough properties\n", +            self.logs.getvalue()) + +    @mock.patch('%s.run_commands' % MPATH) +    def test_schema_when_commands_are_list_or_dict(self, _): +        """No warnings when ubuntu-advantage:commands are a list or dict.""" +        validate_cloudconfig_schema( +            {'ubuntu-advantage': {'commands': ['valid']}}, schema) +        validate_cloudconfig_schema( +            {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) +        self.assertEqual('', self.logs.getvalue()) + + +class TestHandle(CiTestCase): + +    with_logs = True + +    def setUp(self): +        super(TestHandle, self).setUp() +        self.tmp = self.tmp_dir() + +    @mock.patch('%s.run_commands' % MPATH) +    @mock.patch('%s.validate_cloudconfig_schema' % MPATH) +    def test_handle_no_config(self, m_schema, m_run): +        """When no ua-related configuration is provided, nothing happens.""" +        cfg = {} +        handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) +        self.assertIn( +            "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key" +            " in config", +            self.logs.getvalue()) +        m_schema.assert_not_called() +        m_run.assert_not_called() + +    @mock.patch('%s.maybe_install_ua_tools' % MPATH) +    def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install): +        """If ubuntu_advantage is provided, try installing ua-tools package.""" +        cfg = {'ubuntu-advantage': {}} +        mycloud = FakeCloud(None) +        handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) +        m_install.assert_called_once_with(mycloud) + +    @mock.patch('%s.maybe_install_ua_tools' % MPATH) +    def test_handle_runs_commands_provided(self, m_install): +        """When commands are specified as a list, run them.""" +        outfile = self.tmp_path('output.log', dir=self.tmp) + +        cfg = { +            'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, +                                              'echo "MOM" >> %s' % outfile]}} +        mock_path = '%s.sys.stderr' % MPATH +        with mock.patch(mock_path, new_callable=StringIO): +            handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) +        self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + + +class TestMaybeInstallUATools(CiTestCase): + +    with_logs = True + +    def setUp(self): +        super(TestMaybeInstallUATools, self).setUp() +        self.tmp = self.tmp_dir() + +    @mock.patch('%s.util.which' % MPATH) +    def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): +        """Do nothing if ubuntu-advantage-tools already exists.""" +        m_which.return_value = '/usr/bin/ubuntu-advantage'  # already installed +        distro = mock.MagicMock() +        distro.update_package_sources.side_effect = RuntimeError( +            'Some apt error') +        maybe_install_ua_tools(cloud=FakeCloud(distro))  # No RuntimeError + +    @mock.patch('%s.util.which' % MPATH) +    def test_maybe_install_ua_tools_raises_update_errors(self, m_which): +        """maybe_install_ua_tools logs and raises apt update errors.""" +        m_which.return_value = None +        distro = mock.MagicMock() +        distro.update_package_sources.side_effect = RuntimeError( +            'Some apt error') +        with self.assertRaises(RuntimeError) as context_manager: +            maybe_install_ua_tools(cloud=FakeCloud(distro)) +        self.assertEqual('Some apt error', str(context_manager.exception)) +        self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + +    @mock.patch('%s.util.which' % MPATH) +    def test_maybe_install_ua_raises_install_errors(self, m_which): +        """maybe_install_ua_tools logs and raises package install errors.""" +        m_which.return_value = None +        distro = mock.MagicMock() +        distro.update_package_sources.return_value = None +        distro.install_packages.side_effect = RuntimeError( +            'Some install error') +        with self.assertRaises(RuntimeError) as context_manager: +            maybe_install_ua_tools(cloud=FakeCloud(distro)) +        self.assertEqual('Some install error', str(context_manager.exception)) +        self.assertIn( +            'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue()) + +    @mock.patch('%s.util.which' % MPATH) +    def test_maybe_install_ua_tools_happy_path(self, m_which): +        """maybe_install_ua_tools installs ubuntu-advantage-tools.""" +        m_which.return_value = None +        distro = mock.MagicMock()  # No errors raised +        maybe_install_ua_tools(cloud=FakeCloud(distro)) +        distro.update_package_sources.assert_called_once_with() +        distro.install_packages.assert_called_once_with( +            ['ubuntu-advantage-tools']) + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index f87a3432..b814c8ba 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -129,11 +129,8 @@ class Distro(distros.Distro):          if pkgs is None:              pkgs = [] -        cmd = ['pacman'] +        cmd = ['pacman', "-Sy", "--quiet", "--noconfirm"]          # Redirect output -        cmd.append("-Sy") -        cmd.append("--quiet") -        cmd.append("--noconfirm")          if args and isinstance(args, str):              cmd.append(args) diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index aa468bca..754d3df6 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -132,6 +132,12 @@ class Distro(distros.Distro):          LOG.debug("Using network interface %s", bsddev)          return bsddev +    def _select_hostname(self, hostname, fqdn): +        # Should be FQDN if available. See rc.conf(5) in FreeBSD +        if fqdn: +            return fqdn +        return hostname +      def _read_system_hostname(self):          sys_hostname = self._read_hostname(filename=None)          return ('rc.conf', sys_hostname) diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index a219e9fb..162dfa05 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -67,11 +67,10 @@ class Distro(distros.Distro):          if pkgs is None:              pkgs = [] -        cmd = ['zypper']          # No user interaction possible, enable non-interactive mode -        cmd.append('--non-interactive') +        cmd = ['zypper', '--non-interactive'] -        # Comand is the operation, such as install +        # Command is the operation, such as install          if command == 'upgrade':              command = 'update'          cmd.append(command) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index d6c61e4c..dc3f0fc3 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -135,10 +135,8 @@ class MetadataMaterializer(object):  def _skip_retry_on_codes(status_codes, _request_args, cause): -    """Returns if a request should retry based on a given set of codes that -    case retrying to be stopped/skipped. -    """ -    return cause.code in status_codes +    """Returns False if cause.code is in status_codes.""" +    return cause.code not in status_codes  def get_instance_userdata(api_version='latest', diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 7b2cc9db..9e9fe0fe 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -9,12 +9,15 @@ import base64  import glob  import gzip  import io +import os  from . import get_devicelist  from . import read_sys_net_safe  from cloudinit import util +_OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" +  def _klibc_to_config_entry(content, mac_addrs=None):      """Convert a klibc written shell content file to a 'config' entry @@ -103,9 +106,13 @@ def _klibc_to_config_entry(content, mac_addrs=None):      return name, iface +def _get_klibc_net_cfg_files(): +    return glob.glob('/run/net-*.conf') + glob.glob('/run/net6-*.conf') + +  def config_from_klibc_net_cfg(files=None, mac_addrs=None):      if files is None: -        files = glob.glob('/run/net-*.conf') + glob.glob('/run/net6-*.conf') +        files = _get_klibc_net_cfg_files()      entries = []      names = {} @@ -160,10 +167,23 @@ def _b64dgz(b64str, gzipped="try"):      return _decomp_gzip(blob, strict=gzipped != "try") +def _is_initramfs_netconfig(files, cmdline): +    if files: +        if 'ip=' in cmdline or 'ip6=' in cmdline: +            return True +        if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE): +            # iBft can configure networking without ip= +            return True +    return False + +  def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):      if cmdline is None:          cmdline = util.get_cmdline() +    if files is None: +        files = _get_klibc_net_cfg_files() +      if 'network-config=' in cmdline:          data64 = None          for tok in cmdline.split(): @@ -172,7 +192,7 @@ def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):          if data64:              return util.load_yaml(_b64dgz(data64)) -    if 'ip=' not in cmdline and 'ip6=' not in cmdline: +    if not _is_initramfs_netconfig(files, cmdline):          return None      if mac_addrs is None: diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index d3788af8..63443484 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -311,12 +311,12 @@ class Renderer(renderer.Renderer):                      if newname is None:                          continue                      br_config.update({newname: value}) -                    if newname == 'path-cost': -                        # <interface> <cost> -> <interface>: int(<cost>) +                    if newname in ['path-cost', 'port-priority']: +                        # <interface> <value> -> <interface>: int(<value>)                          newvalue = {} -                        for costval in value: -                            (port, cost) = costval.split() -                            newvalue[port] = int(cost) +                        for val in value: +                            (port, portval) = val.split() +                            newvalue[port] = int(portval)                          br_config.update({newname: newvalue})                  if len(br_config) > 0: @@ -336,22 +336,15 @@ class Renderer(renderer.Renderer):                  _extract_addresses(ifcfg, vlan)                  vlans.update({ifname: vlan}) -        # inject global nameserver values under each physical interface -        if nameservers: -            for _eth, cfg in ethernets.items(): -                nscfg = cfg.get('nameservers', {}) -                addresses = nscfg.get('addresses', []) -                addresses += nameservers -                nscfg.update({'addresses': addresses}) -                cfg.update({'nameservers': nscfg}) - -        if searchdomains: -            for _eth, cfg in ethernets.items(): -                nscfg = cfg.get('nameservers', {}) -                search = nscfg.get('search', []) -                search += searchdomains -                nscfg.update({'search': search}) -                cfg.update({'nameservers': nscfg}) +        # inject global nameserver values under each all interface which +        # has addresses and do not already have a DNS configuration +        if nameservers or searchdomains: +            nscfg = {'addresses': nameservers, 'search': searchdomains} +            for section in [ethernets, wifis, bonds, bridges, vlans]: +                for _name, cfg in section.items(): +                    if 'nameservers' in cfg or 'addresses' not in cfg: +                        continue +                    cfg.update({'nameservers': nscfg})          # workaround yaml dictionary key sorting when dumping          def _render_section(name, section): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index fe667d88..6d63e5c5 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -47,7 +47,7 @@ NET_CONFIG_TO_V2 = {                 'bridge_maxage': 'max-age',                 'bridge_maxwait': None,                 'bridge_pathcost': 'path-cost', -               'bridge_portprio': None, +               'bridge_portprio': 'port-priority',                 'bridge_stp': 'stp',                 'bridge_waitport': None}} @@ -708,6 +708,7 @@ class NetworkStateInterpreter(object):          gateway4 = None          gateway6 = None +        nameservers = {}          for address in cfg.get('addresses', []):              subnet = {                  'type': 'static', @@ -723,6 +724,15 @@ class NetworkStateInterpreter(object):                      gateway4 = cfg.get('gateway4')                      subnet.update({'gateway': gateway4}) +            if 'nameservers' in cfg and not nameservers: +                addresses = cfg.get('nameservers').get('addresses') +                if addresses: +                    nameservers['dns_nameservers'] = addresses +                search = cfg.get('nameservers').get('search') +                if search: +                    nameservers['dns_search'] = search +                subnet.update(nameservers) +              subnets.append(subnet)          routes = [] diff --git a/cloudinit/settings.py b/cloudinit/settings.py index c120498f..dde5749d 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -36,6 +36,8 @@ CFG_BUILTIN = {          'SmartOS',          'Bigstep',          'Scaleway', +        'Hetzner', +        'IBMCloud',          # At the end to act as a 'catch' when none of the above work...          'None',      ], diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 7ac8288d..22279d09 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -22,7 +22,7 @@ class DataSourceAliYun(EC2.DataSourceEc2):          super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths)          self.seed_dir = os.path.join(paths.seed_dir, "AliYun") -    def get_hostname(self, fqdn=False, _resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          return self.metadata.get('hostname', 'localhost.localdomain')      def get_public_ssh_keys(self): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 4bcbf3a4..0ee622e2 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -20,7 +20,7 @@ from cloudinit import net  from cloudinit.net.dhcp import EphemeralDHCPv4  from cloudinit import sources  from cloudinit.sources.helpers.azure import get_metadata_from_fabric -from cloudinit.url_helper import readurl, wait_for_url, UrlError +from cloudinit.url_helper import readurl, UrlError  from cloudinit import util  LOG = logging.getLogger(__name__) @@ -49,7 +49,6 @@ DEFAULT_FS = 'ext4'  AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'  REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"  IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" -IMDS_RETRIES = 5  def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -223,6 +222,8 @@ DEF_PASSWD_REDACTION = 'REDACTED'  def get_hostname(hostname_command='hostname'): +    if not isinstance(hostname_command, (list, tuple)): +        hostname_command = (hostname_command,)      return util.subp(hostname_command, capture=True)[0].strip() @@ -449,36 +450,24 @@ class DataSourceAzure(sources.DataSource):          headers = {"Metadata": "true"}          LOG.debug("Start polling IMDS") -        def sleep_cb(response, loop_n): -            return 1 - -        def exception_cb(msg, exception): +        def exc_cb(msg, exception):              if isinstance(exception, UrlError) and exception.code == 404: -                return -            LOG.warning("Exception during polling. Will try DHCP.", -                        exc_info=True) - +                return True              # If we get an exception while trying to call IMDS, we              # call DHCP and setup the ephemeral network to acquire the new IP. -            raise exception +            return False          need_report = report_ready -        for i in range(IMDS_RETRIES): +        while True:              try:                  with EphemeralDHCPv4() as lease:                      if need_report:                          self._report_ready(lease=lease)                          need_report = False -                    wait_for_url([url], max_wait=None, timeout=60, -                                 status_cb=LOG.info, -                                 headers_cb=lambda url: headers, sleep_time=1, -                                 exception_cb=exception_cb, -                                 sleep_time_cb=sleep_cb) -                    return str(readurl(url, headers=headers)) -            except Exception: -                LOG.debug("Exception during polling-retrying dhcp" + -                          " %d more time(s).", (IMDS_RETRIES - i), -                          exc_info=True) +                    return readurl(url, timeout=1, headers=headers, +                                   exception_cb=exc_cb, infinite=True).contents +            except UrlError: +                pass      def _report_ready(self, lease):          """Tells the fabric provisioning has completed diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 4eaad475..c816f349 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -84,7 +84,7 @@ class DataSourceCloudSigma(sources.DataSource):          return True -    def get_hostname(self, fqdn=False, resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          """          Cleans up and uses the server's name if the latter is set. Otherwise          the first part from uuid is being used. diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b8db6267..c7b5fe5f 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -14,6 +14,7 @@ from cloudinit import util  from cloudinit.net import eni +from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform  from cloudinit.sources.helpers import openstack  LOG = logging.getLogger(__name__) @@ -255,6 +256,15 @@ def find_candidate_devs(probe_optical=True):      # an unpartitioned block device (ex sda, not sda1)      devices = [d for d in candidates                 if d in by_label or not util.is_partition(d)] + +    if devices: +        # IBMCloud uses config-2 label, but limited to a single UUID. +        ibm_platform, ibm_path = get_ibm_platform() +        if ibm_path in devices: +            devices.remove(ibm_path) +            LOG.debug("IBMCloud device '%s' (%s) removed from candidate list", +                      ibm_path, ibm_platform) +      return devices diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 2da34a99..d8162623 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -90,7 +90,7 @@ class DataSourceGCE(sources.DataSource):          public_keys_data = self.metadata['public-keys-data']          return _parse_public_keys(public_keys_data, self.default_user) -    def get_hostname(self, fqdn=False, resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          # GCE has long FDQN's and has asked for short hostnames.          return self.metadata['local-hostname'].split('.')[0] @@ -213,16 +213,15 @@ def read_md(address=None, platform_check=True):      if md['availability-zone']:          md['availability-zone'] = md['availability-zone'].split('/')[-1] -    encoding = instance_data.get('user-data-encoding') -    if encoding: +    if 'user-data' in instance_data: +        # instance_data was json, so values are all utf-8 strings. +        ud = instance_data['user-data'].encode("utf-8") +        encoding = instance_data.get('user-data-encoding')          if encoding == 'base64': -            md['user-data'] = b64decode(instance_data.get('user-data')) -        else: +            ud = b64decode(ud) +        elif encoding:              LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) - -    if 'user-data' in md: -        ret['user-data'] = md['user-data'] -        del md['user-data'] +        ret['user-data'] = ud      ret['meta-data'] = md      ret['success'] = True diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py new file mode 100644 index 00000000..5c75b65b --- /dev/null +++ b/cloudinit/sources/DataSourceHetzner.py @@ -0,0 +1,106 @@ +# Author: Jonas Keidel <jonas.keidel@hetzner.com> +# Author: Markus Schade <markus.schade@hetzner.com> +# +# This file is part of cloud-init. See LICENSE file for license information. +# +"""Hetzner Cloud API Documentation. +   https://docs.hetzner.cloud/""" + +from cloudinit import log as logging +from cloudinit import net as cloudnet +from cloudinit import sources +from cloudinit import util + +import cloudinit.sources.helpers.hetzner as hc_helper + +LOG = logging.getLogger(__name__) + +BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1' + +BUILTIN_DS_CONFIG = { +    'metadata_url': BASE_URL_V1 + '/metadata', +    'userdata_url': BASE_URL_V1 + '/userdata', +} + +MD_RETRIES = 60 +MD_TIMEOUT = 2 +MD_WAIT_RETRY = 2 + + +class DataSourceHetzner(sources.DataSource): +    def __init__(self, sys_cfg, distro, paths): +        sources.DataSource.__init__(self, sys_cfg, distro, paths) +        self.distro = distro +        self.metadata = dict() +        self.ds_cfg = util.mergemanydict([ +            util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}), +            BUILTIN_DS_CONFIG]) +        self.metadata_address = self.ds_cfg['metadata_url'] +        self.userdata_address = self.ds_cfg['userdata_url'] +        self.retries = self.ds_cfg.get('retries', MD_RETRIES) +        self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) +        self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) +        self._network_config = None +        self.dsmode = sources.DSMODE_NETWORK + +    def get_data(self): +        if not on_hetzner(): +            return False +        nic = cloudnet.find_fallback_nic() +        with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16, +                                           "169.254.255.255"): +            md = hc_helper.read_metadata( +                self.metadata_address, timeout=self.timeout, +                sec_between=self.wait_retry, retries=self.retries) +            ud = hc_helper.read_userdata( +                self.userdata_address, timeout=self.timeout, +                sec_between=self.wait_retry, retries=self.retries) + +        self.userdata_raw = ud +        self.metadata_full = md + +        """hostname is name provided by user at launch.  The API enforces +        it is a valid hostname, but it is not guaranteed to be resolvable +        in dns or fully qualified.""" +        self.metadata['instance-id'] = md['instance-id'] +        self.metadata['local-hostname'] = md['hostname'] +        self.metadata['network-config'] = md.get('network-config', None) +        self.metadata['public-keys'] = md.get('public-keys', None) +        self.vendordata_raw = md.get("vendor_data", None) + +        return True + +    @property +    def network_config(self): +        """Configure the networking. This needs to be done each boot, since +           the IP information may have changed due to snapshot and/or +           migration. +        """ + +        if self._network_config: +            return self._network_config + +        _net_config = self.metadata['network-config'] +        if not _net_config: +            raise Exception("Unable to get meta-data from server....") + +        self._network_config = _net_config + +        return self._network_config + + +def on_hetzner(): +    return util.read_dmi_data('system-manufacturer') == "Hetzner" + + +# Used to match classes to dependencies +datasources = [ +    (DataSourceHetzner, (sources.DEP_FILESYSTEM, )), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): +    return sources.list_from_depends(depends, datasources) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py new file mode 100644 index 00000000..02b3d56f --- /dev/null +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -0,0 +1,325 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Datasource for IBMCloud. + +IBMCloud is also know as SoftLayer or BlueMix. +IBMCloud hypervisor is xen (2018-03-10). + +There are 2 different api exposed launch methods. + * template: This is the legacy method of launching instances. +   When booting from an image template, the system boots first into +   a "provisioning" mode.  There, host <-> guest mechanisms are utilized +   to execute code in the guest and provision it. + +   Cloud-init will disable itself when it detects that it is in the +   provisioning mode.  It detects this by the presence of +   a file '/root/provisioningConfiguration.cfg'. + +   When provided with user-data, the "first boot" will contain a +   ConfigDrive-like disk labeled with 'METADATA'.  If there is no user-data +   provided, then there is no data-source. + +   Cloud-init never does any network configuration in this mode. + + * os_code: Essentially "launch by OS Code" (Operating System Code). +   This is a more modern approach.  There is no specific "provisioning" boot. +   Instead, cloud-init does all the customization.  With or without +   user-data provided, an OpenStack ConfigDrive like disk is attached. + +   Only disks with label 'config-2' and UUID '9796-932E' are considered. +   This is to avoid this datasource claiming ConfigDrive.  This does +   mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be +   incorrectly identified as IBMCloud. + +TODO: + * is uuid (/sys/hypervisor/uuid) stable for life of an instance? +   it seems it is not the same as data's uuid in the os_code case +   but is in the template case. + +""" +import base64 +import json +import os + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit.sources.helpers import openstack +from cloudinit import util + +LOG = logging.getLogger(__name__) + +IBM_CONFIG_UUID = "9796-932E" + + +class Platforms(object): +    TEMPLATE_LIVE_METADATA = "Template/Live/Metadata" +    TEMPLATE_LIVE_NODATA = "UNABLE TO BE IDENTIFIED." +    TEMPLATE_PROVISIONING_METADATA = "Template/Provisioning/Metadata" +    TEMPLATE_PROVISIONING_NODATA = "Template/Provisioning/No-Metadata" +    OS_CODE = "OS-Code/Live" + + +PROVISIONING = ( +    Platforms.TEMPLATE_PROVISIONING_METADATA, +    Platforms.TEMPLATE_PROVISIONING_NODATA) + + +class DataSourceIBMCloud(sources.DataSource): + +    dsname = 'IBMCloud' +    system_uuid = None + +    def __init__(self, sys_cfg, distro, paths): +        super(DataSourceIBMCloud, self).__init__(sys_cfg, distro, paths) +        self.source = None +        self._network_config = None +        self.network_json = None +        self.platform = None + +    def __str__(self): +        root = super(DataSourceIBMCloud, self).__str__() +        mstr = "%s [%s %s]" % (root, self.platform, self.source) +        return mstr + +    def _get_data(self): +        results = read_md() +        if results is None: +            return False + +        self.source = results['source'] +        self.platform = results['platform'] +        self.metadata = results['metadata'] +        self.userdata_raw = results.get('userdata') +        self.network_json = results.get('networkdata') +        vd = results.get('vendordata') +        self.vendordata_pure = vd +        self.system_uuid = results['system-uuid'] +        try: +            self.vendordata_raw = sources.convert_vendordata(vd) +        except ValueError as e: +            LOG.warning("Invalid content in vendor-data: %s", e) +            self.vendordata_raw = None + +        return True + +    def check_instance_id(self, sys_cfg): +        """quickly (local check only) if self.instance_id is still valid + +        in Template mode, the system uuid (/sys/hypervisor/uuid) is the +        same as found in the METADATA disk.  But that is not true in OS_CODE +        mode.  So we read the system_uuid and keep that for later compare.""" +        if self.system_uuid is None: +            return False +        return self.system_uuid == _read_system_uuid() + +    @property +    def network_config(self): +        if self.platform != Platforms.OS_CODE: +            # If deployed from template, an agent in the provisioning +            # environment handles networking configuration. Not cloud-init. +            return {'config': 'disabled', 'version': 1} +        if self._network_config is None: +            if self.network_json is not None: +                LOG.debug("network config provided via network_json") +                self._network_config = openstack.convert_net_json( +                    self.network_json, known_macs=None) +            else: +                LOG.debug("no network configuration available.") +        return self._network_config + + +def _read_system_uuid(): +    uuid_path = "/sys/hypervisor/uuid" +    if not os.path.isfile(uuid_path): +        return None +    return util.load_file(uuid_path).strip().lower() + + +def _is_xen(): +    return os.path.exists("/proc/xen") + + +def _is_ibm_provisioning(): +    return os.path.exists("/root/provisioningConfiguration.cfg") + + +def get_ibm_platform(): +    """Return a tuple (Platform, path) + +    If this is Not IBM cloud, then the return value is (None, None). +    An instance in provisioning mode is considered running on IBM cloud.""" +    label_mdata = "METADATA" +    label_cfg2 = "CONFIG-2" +    not_found = (None, None) + +    if not _is_xen(): +        return not_found + +    # fslabels contains only the first entry with a given label. +    fslabels = {} +    try: +        devs = util.blkid() +    except util.ProcessExecutionError as e: +        LOG.warning("Failed to run blkid: %s", e) +        return (None, None) + +    for dev in sorted(devs.keys()): +        data = devs[dev] +        label = data.get("LABEL", "").upper() +        uuid = data.get("UUID", "").upper() +        if label not in (label_mdata, label_cfg2): +            continue +        if label in fslabels: +            LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s", +                        label, fslabels[label], data) +            continue +        if label == label_cfg2 and uuid != IBM_CONFIG_UUID: +            LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s", +                      dev, label, uuid, data) +            continue +        fslabels[label] = data + +    metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME') +    cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME') + +    if cfg2_path: +        return (Platforms.OS_CODE, cfg2_path) +    elif metadata_path: +        if _is_ibm_provisioning(): +            return (Platforms.TEMPLATE_PROVISIONING_METADATA, metadata_path) +        else: +            return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) +    elif _is_ibm_provisioning(): +            return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) +    return not_found + + +def read_md(): +    """Read data from IBM Cloud. + +    @return: None if not running on IBM Cloud. +             dictionary with guaranteed fields: metadata, version +             and optional fields: userdata, vendordata, networkdata. +             Also includes the system uuid from /sys/hypervisor/uuid.""" +    platform, path = get_ibm_platform() +    if platform is None: +        LOG.debug("This is not an IBMCloud platform.") +        return None +    elif platform in PROVISIONING: +        LOG.debug("Cloud-init is disabled during provisioning: %s.", +                  platform) +        return None + +    ret = {'platform': platform, 'source': path, +           'system-uuid': _read_system_uuid()} + +    try: +        if os.path.isdir(path): +            results = metadata_from_dir(path) +        else: +            results = util.mount_cb(path, metadata_from_dir) +    except BrokenMetadata as e: +        raise RuntimeError( +            "Failed reading IBM config disk (platform=%s path=%s): %s" % +            (platform, path, e)) + +    ret.update(results) +    return ret + + +class BrokenMetadata(IOError): +    pass + + +def metadata_from_dir(source_dir): +    """Walk source_dir extracting standardized metadata. + +    Certain metadata keys are renamed to present a standardized set of metadata +    keys. + +    This function has a lot in common with ConfigDriveReader.read_v2 but +    there are a number of inconsistencies, such key renames and as only +    presenting a 'latest' version which make it an unlikely candidate to share +    code. + +    @return: Dict containing translated metadata, userdata, vendordata, +        networkdata as present. +    """ + +    def opath(fname): +        return os.path.join("openstack", "latest", fname) + +    def load_json_bytes(blob): +        return json.loads(blob.decode('utf-8')) + +    files = [ +        # tuples of (results_name, path, translator) +        ('metadata_raw', opath('meta_data.json'), load_json_bytes), +        ('userdata', opath('user_data'), None), +        ('vendordata', opath('vendor_data.json'), load_json_bytes), +        ('networkdata', opath('network_data.json'), load_json_bytes), +    ] + +    results = {} +    for (name, path, transl) in files: +        fpath = os.path.join(source_dir, path) +        raw = None +        try: +            raw = util.load_file(fpath, decode=False) +        except IOError as e: +            LOG.debug("Failed reading path '%s': %s", fpath, e) + +        if raw is None or transl is None: +            data = raw +        else: +            try: +                data = transl(raw) +            except Exception as e: +                raise BrokenMetadata("Failed decoding %s: %s" % (path, e)) + +        results[name] = data + +    if results.get('metadata_raw') is None: +        raise BrokenMetadata( +            "%s missing required file 'meta_data.json'" % source_dir) + +    results['metadata'] = {} + +    md_raw = results['metadata_raw'] +    md = results['metadata'] +    if 'random_seed' in md_raw: +        try: +            md['random_seed'] = base64.b64decode(md_raw['random_seed']) +        except (ValueError, TypeError) as e: +            raise BrokenMetadata( +                "Badly formatted metadata random_seed entry: %s" % e) + +    renames = ( +        ('public_keys', 'public-keys'), ('hostname', 'local-hostname'), +        ('uuid', 'instance-id')) +    for mdname, newname in renames: +        if mdname in md_raw: +            md[newname] = md_raw[mdname] + +    return results + + +# Used to match classes to dependencies +datasources = [ +    (DataSourceIBMCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): +    return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": +    import argparse + +    parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata') +    args = parser.parse_args() +    data = read_md() +    print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 6e62f984..dc914a72 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -95,11 +95,20 @@ class DataSourceOVF(sources.DataSource):                            "VMware Customization support")              elif not util.get_cfg_option_bool(                      self.sys_cfg, "disable_vmware_customization", True): -                deployPkgPluginPath = search_file("/usr/lib/vmware-tools", -                                                  "libdeployPkgPlugin.so") -                if not deployPkgPluginPath: -                    deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", -                                                      "libdeployPkgPlugin.so") + +                search_paths = ( +                    "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", +                    "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") + +                plugin = "libdeployPkgPlugin.so" +                deployPkgPluginPath = None +                for path in search_paths: +                    deployPkgPluginPath = search_file(path, plugin) +                    if deployPkgPluginPath: +                        LOG.debug("Found the customization plugin at %s", +                                  deployPkgPluginPath) +                        break +                  if deployPkgPluginPath:                      # When the VM is powered on, the "VMware Tools" daemon                      # copies the customization specification file to @@ -111,6 +120,8 @@ class DataSourceOVF(sources.DataSource):                          msg="waiting for configuration file",                          func=wait_for_imc_cfg_file,                          args=("cust.cfg", max_wait)) +                else: +                    LOG.debug("Did not find the customization plugin.")                  if vmwareImcConfigFilePath:                      LOG.debug("Found VMware Customization Config File at %s", diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index ce47b6bd..d4a41116 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -20,7 +20,6 @@ import string  from cloudinit import log as logging  from cloudinit import net -from cloudinit.net import eni  from cloudinit import sources  from cloudinit import util @@ -91,19 +90,19 @@ class DataSourceOpenNebula(sources.DataSource):              return False          self.seed = seed -        self.network_eni = results.get('network-interfaces') +        self.network = results.get('network-interfaces')          self.metadata = md          self.userdata_raw = results.get('userdata')          return True      @property      def network_config(self): -        if self.network_eni is not None: -            return eni.convert_eni_data(self.network_eni) +        if self.network is not None: +            return self.network          else:              return None -    def get_hostname(self, fqdn=False, resolve_ip=None): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          if resolve_ip is None:              if self.dsmode == sources.DSMODE_NETWORK:                  resolve_ip = True @@ -143,18 +142,42 @@ class OpenNebulaNetwork(object):      def mac2network(self, mac):          return self.mac2ip(mac).rpartition(".")[0] + ".0" -    def get_dns(self, dev): -        return self.get_field(dev, "dns", "").split() +    def get_nameservers(self, dev): +        nameservers = {} +        dns = self.get_field(dev, "dns", "").split() +        dns.extend(self.context.get('DNS', "").split()) +        if dns: +            nameservers['addresses'] = dns +        search_domain = self.get_field(dev, "search_domain", "").split() +        if search_domain: +            nameservers['search'] = search_domain +        return nameservers -    def get_domain(self, dev): -        return self.get_field(dev, "domain") +    def get_mtu(self, dev): +        return self.get_field(dev, "mtu")      def get_ip(self, dev, mac):          return self.get_field(dev, "ip", self.mac2ip(mac)) +    def get_ip6(self, dev): +        addresses6 = [] +        ip6 = self.get_field(dev, "ip6") +        if ip6: +            addresses6.append(ip6) +        ip6_ula = self.get_field(dev, "ip6_ula") +        if ip6_ula: +            addresses6.append(ip6_ula) +        return addresses6 + +    def get_ip6_prefix(self, dev): +        return self.get_field(dev, "ip6_prefix_length", "64") +      def get_gateway(self, dev):          return self.get_field(dev, "gateway") +    def get_gateway6(self, dev): +        return self.get_field(dev, "gateway6") +      def get_mask(self, dev):          return self.get_field(dev, "mask", "255.255.255.0") @@ -171,13 +194,11 @@ class OpenNebulaNetwork(object):          return default if val in (None, "") else val      def gen_conf(self): -        global_dns = self.context.get('DNS', "").split() - -        conf = [] -        conf.append('auto lo') -        conf.append('iface lo inet loopback') -        conf.append('') +        netconf = {} +        netconf['version'] = 2 +        netconf['ethernets'] = {} +        ethernets = {}          for mac, dev in self.ifaces.items():              mac = mac.lower() @@ -185,29 +206,49 @@ class OpenNebulaNetwork(object):              # dev stores the current system name.              c_dev = self.context_devname.get(mac, dev) -            conf.append('auto ' + dev) -            conf.append('iface ' + dev + ' inet static') -            conf.append('  #hwaddress %s' % mac) -            conf.append('  address ' + self.get_ip(c_dev, mac)) -            conf.append('  network ' + self.get_network(c_dev, mac)) -            conf.append('  netmask ' + self.get_mask(c_dev)) +            devconf = {} + +            # Set MAC address +            devconf['match'] = {'macaddress': mac} +            # Set IPv4 address +            devconf['addresses'] = [] +            mask = self.get_mask(c_dev) +            prefix = str(net.mask_to_net_prefix(mask)) +            devconf['addresses'].append( +                self.get_ip(c_dev, mac) + '/' + prefix) + +            # Set IPv6 Global and ULA address +            addresses6 = self.get_ip6(c_dev) +            if addresses6: +                prefix6 = self.get_ip6_prefix(c_dev) +                devconf['addresses'].extend( +                    [i + '/' + prefix6 for i in addresses6]) + +            # Set IPv4 default gateway              gateway = self.get_gateway(c_dev)              if gateway: -                conf.append('  gateway ' + gateway) +                devconf['gateway4'] = gateway + +            # Set IPv6 default gateway +            gateway6 = self.get_gateway6(c_dev) +            if gateway: +                devconf['gateway6'] = gateway6 -            domain = self.get_domain(c_dev) -            if domain: -                conf.append('  dns-search ' + domain) +            # Set DNS servers and search domains +            nameservers = self.get_nameservers(c_dev) +            if nameservers: +                devconf['nameservers'] = nameservers -            # add global DNS servers to all interfaces -            dns = self.get_dns(c_dev) -            if global_dns or dns: -                conf.append('  dns-nameservers ' + ' '.join(global_dns + dns)) +            # Set MTU size +            mtu = self.get_mtu(c_dev) +            if mtu: +                devconf['mtu'] = mtu -            conf.append('') +            ethernets[dev] = devconf -        return "\n".join(conf) +        netconf['ethernets'] = ethernets +        return(netconf)  def find_candidate_devs(): @@ -393,10 +434,10 @@ def read_context_disk_dir(source_dir, asuser=None):              except TypeError:                  LOG.warning("Failed base64 decoding of userdata") -    # generate static /etc/network/interfaces +    # generate Network Configuration v2      # only if there are any required context variables -    # http://opennebula.org/documentation:rel3.8:cong#network_configuration -    ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP$', k)] +    # http://docs.opennebula.org/5.4/operation/references/template.html#context-section +    ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]      if ipaddr_keys:          onet = OpenNebulaNetwork(context)          results['network-interfaces'] = onet.gen_conf() diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b0b19c93..e2502b02 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -113,9 +113,9 @@ def query_data_api_once(api_address, timeout, requests_session):              retries=0,              session=requests_session,              # If the error is a HTTP/404 or a ConnectionError, go into raise -            # block below. -            exception_cb=lambda _, exc: exc.code == 404 or ( -                isinstance(exc.cause, requests.exceptions.ConnectionError) +            # block below and don't bother retrying. +            exception_cb=lambda _, exc: exc.code != 404 and ( +                not isinstance(exc.cause, requests.exceptions.ConnectionError)              )          )          return util.decode_binary(resp.contents) @@ -215,7 +215,7 @@ class DataSourceScaleway(sources.DataSource):      def get_public_ssh_keys(self):          return [key['key'] for key in self.metadata['ssh_public_keys']] -    def get_hostname(self, fqdn=False, resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          return self.metadata['hostname']      @property diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a05ca2f6..df0b374a 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -276,21 +276,34 @@ class DataSource(object):              return "iid-datasource"          return str(self.metadata['instance-id']) -    def get_hostname(self, fqdn=False, resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): +        """Get hostname or fqdn from the datasource. Look it up if desired. + +        @param fqdn: Boolean, set True to return hostname with domain. +        @param resolve_ip: Boolean, set True to attempt to resolve an ipv4 +            address provided in local-hostname meta-data. +        @param metadata_only: Boolean, set True to avoid looking up hostname +            if meta-data doesn't have local-hostname present. + +        @return: hostname or qualified hostname. Optionally return None when +            metadata_only is True and local-hostname data is not available. +        """          defdomain = "localdomain"          defhost = "localhost"          domain = defdomain          if not self.metadata or 'local-hostname' not in self.metadata: +            if metadata_only: +                return None              # this is somewhat questionable really.              # the cloud datasource was asked for a hostname              # and didn't have one. raising error might be more appropriate              # but instead, basically look up the existing hostname              toks = []              hostname = util.get_hostname() -            fqdn = util.get_fqdn_from_hosts(hostname) -            if fqdn and fqdn.find(".") > 0: -                toks = str(fqdn).split(".") +            hosts_fqdn = util.get_fqdn_from_hosts(hostname) +            if hosts_fqdn and hosts_fqdn.find(".") > 0: +                toks = str(hosts_fqdn).split(".")              elif hostname and hostname.find(".") > 0:                  toks = str(hostname).split(".")              elif hostname: diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py new file mode 100644 index 00000000..2554530d --- /dev/null +++ b/cloudinit/sources/helpers/hetzner.py @@ -0,0 +1,26 @@ +# Author: Jonas Keidel <jonas.keidel@hetzner.com> +# Author: Markus Schade <markus.schade@hetzner.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import url_helper +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +def read_metadata(url, timeout=2, sec_between=2, retries=30): +    response = url_helper.readurl(url, timeout=timeout, +                                  sec_between=sec_between, retries=retries) +    if not response.ok(): +        raise RuntimeError("unable to read metadata at %s" % url) +    return util.load_yaml(response.contents.decode()) + + +def read_userdata(url, timeout=2, sec_between=2, retries=30): +    response = url_helper.readurl(url, timeout=timeout, +                                  sec_between=sec_between, retries=retries) +    if not response.ok(): +        raise RuntimeError("unable to read userdata at %s" % url) +    return response.contents diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index af151154..e7fda22a 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -1,13 +1,15 @@  # This file is part of cloud-init. See LICENSE file for license information. +import inspect  import os  import six  import stat  from cloudinit.helpers import Paths +from cloudinit import importer  from cloudinit.sources import (      INSTANCE_JSON_FILE, DataSource) -from cloudinit.tests.helpers import CiTestCase, skipIf +from cloudinit.tests.helpers import CiTestCase, skipIf, mock  from cloudinit.user_data import UserDataProcessor  from cloudinit import util @@ -108,6 +110,74 @@ class TestDataSource(CiTestCase):          self.assertEqual('userdata_raw', datasource.userdata_raw)          self.assertEqual('vendordata_raw', datasource.vendordata_raw) +    def test_get_hostname_strips_local_hostname_without_domain(self): +        """Datasource.get_hostname strips metadata local-hostname of domain.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertTrue(datasource.get_data()) +        self.assertEqual( +            'test-subclass-hostname', datasource.metadata['local-hostname']) +        self.assertEqual('test-subclass-hostname', datasource.get_hostname()) +        datasource.metadata['local-hostname'] = 'hostname.my.domain.com' +        self.assertEqual('hostname', datasource.get_hostname()) + +    def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): +        """Datasource.get_hostname with fqdn set gets qualified hostname.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertTrue(datasource.get_data()) +        datasource.metadata['local-hostname'] = 'hostname.my.domain.com' +        self.assertEqual( +            'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) + +    def test_get_hostname_without_metadata_uses_system_hostname(self): +        """Datasource.gethostname runs util.get_hostname when no metadata.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertEqual({}, datasource.metadata) +        mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' +        with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: +            with mock.patch(mock_fqdn) as m_fqdn: +                m_gethost.return_value = 'systemhostname.domain.com' +                m_fqdn.return_value = None  # No maching fqdn in /etc/hosts +                self.assertEqual('systemhostname', datasource.get_hostname()) +                self.assertEqual( +                    'systemhostname.domain.com', +                    datasource.get_hostname(fqdn=True)) + +    def test_get_hostname_without_metadata_returns_none(self): +        """Datasource.gethostname returns None when metadata_only and no MD.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertEqual({}, datasource.metadata) +        mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' +        with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: +            with mock.patch(mock_fqdn) as m_fqdn: +                self.assertIsNone(datasource.get_hostname(metadata_only=True)) +                self.assertIsNone( +                    datasource.get_hostname(fqdn=True, metadata_only=True)) +        self.assertEqual([], m_gethost.call_args_list) +        self.assertEqual([], m_fqdn.call_args_list) + +    def test_get_hostname_without_metadata_prefers_etc_hosts(self): +        """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertEqual({}, datasource.metadata) +        mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' +        with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: +            with mock.patch(mock_fqdn) as m_fqdn: +                m_gethost.return_value = 'systemhostname.domain.com' +                m_fqdn.return_value = 'fqdnhostname.domain.com' +                self.assertEqual('fqdnhostname', datasource.get_hostname()) +                self.assertEqual('fqdnhostname.domain.com', +                                 datasource.get_hostname(fqdn=True)) +      def test_get_data_write_json_instance_data(self):          """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root."""          tmp = self.tmp_dir() @@ -200,3 +270,29 @@ class TestDataSource(CiTestCase):              "WARNING: Error persisting instance-data.json: 'utf8' codec can't"              " decode byte 0xaa in position 2: invalid start byte",              self.logs.getvalue()) + +    def test_get_hostname_subclass_support(self): +        """Validate get_hostname signature on all subclasses of DataSource.""" +        # Use inspect.getfullargspec when we drop py2.6 and py2.7 +        get_args = inspect.getargspec  # pylint: disable=W1505 +        base_args = get_args(DataSource.get_hostname)  # pylint: disable=W1505 +        # Import all DataSource subclasses so we can inspect them. +        modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) +        for loc, name in modules.items(): +            mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) +            if mod_locs: +                importer.import_module(mod_locs[0]) +        for child in DataSource.__subclasses__(): +            if 'Test' in child.dsname: +                continue +            self.assertEqual( +                base_args, +                get_args(child.get_hostname),  # pylint: disable=W1505 +                '%s does not implement DataSource.get_hostname params' +                % child) +            for grandchild in child.__subclasses__(): +                self.assertEqual( +                    base_args, +                    get_args(grandchild.get_hostname),  # pylint: disable=W1505 +                    '%s does not implement DataSource.get_hostname params' +                    % grandchild) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d0452688..bc4ebc85 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -132,8 +132,7 @@ class Init(object):          return initial_dirs      def purge_cache(self, rm_instance_lnk=False): -        rm_list = [] -        rm_list.append(self.paths.boot_finished) +        rm_list = [self.paths.boot_finished]          if rm_instance_lnk:              rm_list.append(self.paths.instance_link)          for f in rm_list: diff --git a/cloudinit/subp.py b/cloudinit/subp.py new file mode 100644 index 00000000..0ad09306 --- /dev/null +++ b/cloudinit/subp.py @@ -0,0 +1,57 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Common utility functions for interacting with subprocess.""" + +# TODO move subp shellify and runparts related functions out of util.py + +import logging + +LOG = logging.getLogger(__name__) + + +def prepend_base_command(base_command, commands): +    """Ensure user-provided commands start with base_command; warn otherwise. + +    Each command is either a list or string. Perform the following: +       - If the command is a list, pop the first element if it is None +       - If the command is a list, insert base_command as the first element if +         not present. +       - When the command is a string not starting with 'base-command', warn. + +    Allow flexibility to provide non-base-command environment/config setup if +    needed. + +    @commands: List of commands. Each command element is a list or string. + +    @return: List of 'fixed up' commands. +    @raise: TypeError on invalid config item type. +    """ +    warnings = [] +    errors = [] +    fixed_commands = [] +    for command in commands: +        if isinstance(command, list): +            if command[0] is None:  # Avoid warnings by specifying None +                command = command[1:] +            elif command[0] != base_command:  # Automatically prepend +                command.insert(0, base_command) +        elif isinstance(command, str): +            if not command.startswith('%s ' % base_command): +                warnings.append(command) +        else: +            errors.append(str(command)) +            continue +        fixed_commands.append(command) + +    if warnings: +        LOG.warning( +            'Non-%s commands in %s config:\n%s', +            base_command, base_command, '\n'.join(warnings)) +    if errors: +        raise TypeError( +            'Invalid {name} config.' +            ' These commands are not a string or list:\n{errors}'.format( +                name=base_command, errors='\n'.join(errors))) +    return fixed_commands + + +# vi: ts=4 expandtab diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 0080c729..999b1d7c 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -173,17 +173,15 @@ class CiTestCase(TestCase):              dir = self.tmp_dir()          return os.path.normpath(os.path.abspath(os.path.join(dir, path))) -    def assertRaisesCodeEqual(self, expected, found): -        """Handle centos6 having different context manager for assertRaises. -            with assertRaises(Exception) as e: -                raise Exception("BOO") - -            centos6 will have e.exception as an integer. -            anything nwere will have it as something with a '.code'""" -        if isinstance(found, int): -            self.assertEqual(expected, found) -        else: -            self.assertEqual(expected, found.code) +    def sys_exit(self, code): +        """Provide a wrapper around sys.exit for python 2.6 + +        In 2.6, this code would produce 'cm.exception' with value int(2) +        rather than the SystemExit that was raised by sys.exit(2). +            with assertRaises(SystemExit) as cm: +                sys.exit(2) +        """ +        raise SystemExit(code)  class ResourceUsingTestCase(CiTestCase): @@ -285,10 +283,15 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):      def patchOS(self, new_root):          patch_funcs = {              os.path: [('isfile', 1), ('exists', 1), -                      ('islink', 1), ('isdir', 1)], +                      ('islink', 1), ('isdir', 1), ('lexists', 1)],              os: [('listdir', 1), ('mkdir', 1), -                 ('lstat', 1), ('symlink', 2)], +                 ('lstat', 1), ('symlink', 2)]          } + +        if hasattr(os, 'scandir'): +            # py27 does not have scandir +            patch_funcs[os].append(('scandir', 1)) +          for (mod, funcs) in patch_funcs.items():              for f, nargs in funcs:                  func = getattr(mod, f) @@ -411,6 +414,19 @@ except AttributeError:          return decorator +try: +    import jsonschema +    assert jsonschema  # avoid pyflakes error F401: import unused +    _missing_jsonschema_dep = False +except ImportError: +    _missing_jsonschema_dep = True + + +def skipUnlessJsonSchema(): +    return skipIf( +        _missing_jsonschema_dep, "No python-jsonschema dependency present.") + +  # older versions of mock do not have the useful 'assert_not_called'  if not hasattr(mock.Mock, 'assert_not_called'):      def __mock_assert_not_called(mmock): @@ -422,12 +438,12 @@ if not hasattr(mock.Mock, 'assert_not_called'):      mock.Mock.assert_not_called = __mock_assert_not_called -# older unittest2.TestCase (centos6) do not have assertRaisesRegex -# And setting assertRaisesRegex to assertRaisesRegexp causes -# https://github.com/PyCQA/pylint/issues/1653 . So the workaround. +# older unittest2.TestCase (centos6) have only the now-deprecated +# assertRaisesRegexp. Simple assignment makes pylint complain, about +# users of assertRaisesRegex so we use getattr to trick it. +# https://github.com/PyCQA/pylint/issues/1946  if not hasattr(unittest2.TestCase, 'assertRaisesRegex'): -    def _tricky(*args, **kwargs): -        return unittest2.TestCase.assertRaisesRegexp -    unittest2.TestCase.assertRaisesRegex = _tricky +    unittest2.TestCase.assertRaisesRegex = ( +        getattr(unittest2.TestCase, 'assertRaisesRegexp'))  # vi: ts=4 expandtab diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py new file mode 100644 index 00000000..448097d3 --- /dev/null +++ b/cloudinit/tests/test_subp.py @@ -0,0 +1,61 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.subp utility functions""" + +from cloudinit import subp +from cloudinit.tests.helpers import CiTestCase + + +class TestPrependBaseCommands(CiTestCase): + +    with_logs = True + +    def test_prepend_base_command_errors_on_neither_string_nor_list(self): +        """Raise an error for each command which is not a string or list.""" +        orig_commands = ['ls', 1, {'not': 'gonna work'}, ['basecmd', 'list']] +        with self.assertRaises(TypeError) as context_manager: +            subp.prepend_base_command( +                base_command='basecmd', commands=orig_commands) +        self.assertEqual( +            "Invalid basecmd config. These commands are not a string or" +            " list:\n1\n{'not': 'gonna work'}", +            str(context_manager.exception)) + +    def test_prepend_base_command_warns_on_non_base_string_commands(self): +        """Warn on each non-base for commands of type string.""" +        orig_commands = [ +            'ls', 'basecmd list', 'touch /blah', 'basecmd install x'] +        fixed_commands = subp.prepend_base_command( +            base_command='basecmd', commands=orig_commands) +        self.assertEqual( +            'WARNING: Non-basecmd commands in basecmd config:\n' +            'ls\ntouch /blah\n', +            self.logs.getvalue()) +        self.assertEqual(orig_commands, fixed_commands) + +    def test_prepend_base_command_prepends_on_non_base_list_commands(self): +        """Prepend 'basecmd' for each non-basecmd command of type list.""" +        orig_commands = [['ls'], ['basecmd', 'list'], ['basecmda', '/blah'], +                         ['basecmd', 'install', 'x']] +        expected = [['basecmd', 'ls'], ['basecmd', 'list'], +                    ['basecmd', 'basecmda', '/blah'], +                    ['basecmd', 'install', 'x']] +        fixed_commands = subp.prepend_base_command( +            base_command='basecmd', commands=orig_commands) +        self.assertEqual('', self.logs.getvalue()) +        self.assertEqual(expected, fixed_commands) + +    def test_prepend_base_command_removes_first_item_when_none(self): +        """Remove the first element of a non-basecmd when it is None.""" +        orig_commands = [[None, 'ls'], ['basecmd', 'list'], +                         [None, 'touch', '/blah'], +                         ['basecmd', 'install', 'x']] +        expected = [['ls'], ['basecmd', 'list'], +                    ['touch', '/blah'], +                    ['basecmd', 'install', 'x']] +        fixed_commands = subp.prepend_base_command( +            base_command='basecmd', commands=orig_commands) +        self.assertEqual('', self.logs.getvalue()) +        self.assertEqual(expected, fixed_commands) + +# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index ba6bf699..3f37dbb6 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -3,6 +3,7 @@  """Tests for cloudinit.util"""  import logging +from textwrap import dedent  import cloudinit.util as util @@ -16,6 +17,25 @@ MOUNT_INFO = [  ] +class FakeCloud(object): + +    def __init__(self, hostname, fqdn): +        self.hostname = hostname +        self.fqdn = fqdn +        self.calls = [] + +    def get_hostname(self, fqdn=None, metadata_only=None): +        myargs = {} +        if fqdn is not None: +            myargs['fqdn'] = fqdn +        if metadata_only is not None: +            myargs['metadata_only'] = metadata_only +        self.calls.append(myargs) +        if fqdn: +            return self.fqdn +        return self.hostname + +  class TestUtil(CiTestCase):      def test_parse_mount_info_no_opts_no_arg(self): @@ -44,3 +64,152 @@ class TestUtil(CiTestCase):          m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime')          is_rw = util.mount_is_read_write('/')          self.assertEqual(is_rw, False) + + +class TestShellify(CiTestCase): + +    def test_input_dict_raises_type_error(self): +        self.assertRaisesRegex( +            TypeError, 'Input.*was.*dict.*xpected', +            util.shellify, {'mykey': 'myval'}) + +    def test_input_str_raises_type_error(self): +        self.assertRaisesRegex( +            TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar") + +    def test_value_with_int_raises_type_error(self): +        self.assertRaisesRegex( +            TypeError, 'shellify.*int', util.shellify, ["foo", 1]) + +    def test_supports_strings_and_lists(self): +        self.assertEqual( +            '\n'.join(["#!/bin/sh", "echo hi mom", "'echo' 'hi dad'", +                       "'echo' 'hi' 'sis'", ""]), +            util.shellify(["echo hi mom", ["echo", "hi dad"], +                           ('echo', 'hi', 'sis')])) + + +class TestGetHostnameFqdn(CiTestCase): + +    def test_get_hostname_fqdn_from_only_cfg_fqdn(self): +        """When cfg only has the fqdn key, derive hostname and fqdn from it.""" +        hostname, fqdn = util.get_hostname_fqdn( +            cfg={'fqdn': 'myhost.domain.com'}, cloud=None) +        self.assertEqual('myhost', hostname) +        self.assertEqual('myhost.domain.com', fqdn) + +    def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): +        """When cfg has both fqdn and hostname keys, return them.""" +        hostname, fqdn = util.get_hostname_fqdn( +            cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None) +        self.assertEqual('other', hostname) +        self.assertEqual('myhost.domain.com', fqdn) + +    def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): +        """When cfg has only hostname key which represents a fqdn, use that.""" +        hostname, fqdn = util.get_hostname_fqdn( +            cfg={'hostname': 'myhost.domain.com'}, cloud=None) +        self.assertEqual('myhost', hostname) +        self.assertEqual('myhost.domain.com', fqdn) + +    def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): +        """When cfg has a hostname without a '.' query cloud.get_hostname.""" +        mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') +        hostname, fqdn = util.get_hostname_fqdn( +            cfg={'hostname': 'myhost'}, cloud=mycloud) +        self.assertEqual('myhost', hostname) +        self.assertEqual('cloudhost.mycloud.com', fqdn) +        self.assertEqual( +            [{'fqdn': True, 'metadata_only': False}], mycloud.calls) + +    def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): +        """When cfg has neither hostname nor fqdn cloud.get_hostname.""" +        mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') +        hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) +        self.assertEqual('cloudhost', hostname) +        self.assertEqual('cloudhost.mycloud.com', fqdn) +        self.assertEqual( +            [{'fqdn': True, 'metadata_only': False}, +             {'metadata_only': False}], mycloud.calls) + +    def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): +        """Calls to cloud.get_hostname pass the metadata_only parameter.""" +        mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') +        hostname, fqdn = util.get_hostname_fqdn( +            cfg={}, cloud=mycloud, metadata_only=True) +        self.assertEqual( +            [{'fqdn': True, 'metadata_only': True}, +             {'metadata_only': True}], mycloud.calls) + + +class TestBlkid(CiTestCase): +    ids = { +        "id01": "1111-1111", +        "id02": "22222222-2222", +        "id03": "33333333-3333", +        "id04": "44444444-4444", +        "id05": "55555555-5555-5555-5555-555555555555", +        "id06": "66666666-6666-6666-6666-666666666666", +        "id07": "52894610484658920398", +        "id08": "86753098675309867530", +        "id09": "99999999-9999-9999-9999-999999999999", +    } + +    blkid_out = dedent("""\ +        /dev/loop0: TYPE="squashfs" +        /dev/loop1: TYPE="squashfs" +        /dev/loop2: TYPE="squashfs" +        /dev/loop3: TYPE="squashfs" +        /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" +        /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" +        /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" +        /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ +                       """TYPE="zfs_member" PARTUUID="{id09}" +        /dev/loop4: TYPE="squashfs" +      """) + +    maxDiff = None + +    def _get_expected(self): +        return ({ +            "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, +            "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, +            "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, +            "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, +            "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, +            "/dev/sda1": {"DEVNAME": "/dev/sda1", "TYPE": "vfat", +                          "UUID": self.ids["id01"], +                          "PARTUUID": self.ids["id02"]}, +            "/dev/sda2": {"DEVNAME": "/dev/sda2", "TYPE": "ext4", +                          "UUID": self.ids["id03"], +                          "PARTUUID": self.ids["id04"]}, +            "/dev/sda3": {"DEVNAME": "/dev/sda3", "TYPE": "ext4", +                          "UUID": self.ids["id05"], +                          "PARTUUID": self.ids["id06"]}, +            "/dev/sda4": {"DEVNAME": "/dev/sda4", "TYPE": "zfs_member", +                          "LABEL": "default", +                          "UUID": self.ids["id07"], +                          "UUID_SUB": self.ids["id08"], +                          "PARTUUID": self.ids["id09"]}, +        }) + +    @mock.patch("cloudinit.util.subp") +    def test_functional_blkid(self, m_subp): +        m_subp.return_value = ( +            self.blkid_out.format(**self.ids), "") +        self.assertEqual(self._get_expected(), util.blkid()) +        m_subp.assert_called_with(["blkid", "-o", "full"], capture=True, +                                  decode="replace") + +    @mock.patch("cloudinit.util.subp") +    def test_blkid_no_cache_uses_no_cache(self, m_subp): +        """blkid should turn off cache if disable_cache is true.""" +        m_subp.return_value = ( +            self.blkid_out.format(**self.ids), "") +        self.assertEqual(self._get_expected(), +                         util.blkid(disable_cache=True)) +        m_subp.assert_called_with(["blkid", "-o", "full", "-c", "/dev/null"], +                                  capture=True, decode="replace") + + +# vi: ts=4 expandtab diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0a5be0b3..03a573af 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -16,7 +16,7 @@ import time  from email.utils import parsedate  from functools import partial - +from itertools import count  from requests import exceptions  from six.moves.urllib.parse import ( @@ -47,7 +47,7 @@ try:      _REQ_VER = LooseVersion(_REQ.version)  # pylint: disable=no-member      if _REQ_VER >= LooseVersion('0.8.8'):          SSL_ENABLED = True -    if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'): +    if LooseVersion('0.7.0') <= _REQ_VER < LooseVersion('1.0.0'):          CONFIG_ENABLED = True  except ImportError:      pass @@ -121,7 +121,7 @@ class UrlResponse(object):          upper = 300          if redirects_ok:              upper = 400 -        if self.code >= 200 and self.code < upper: +        if 200 <= self.code < upper:              return True          else:              return False @@ -172,7 +172,7 @@ def _get_ssl_args(url, ssl_details):  def readurl(url, data=None, timeout=None, retries=0, sec_between=1,              headers=None, headers_cb=None, ssl_details=None,              check_status=True, allow_redirects=True, exception_cb=None, -            session=None): +            session=None, infinite=False):      url = _cleanurl(url)      req_args = {          'url': url, @@ -220,7 +220,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,      excps = []      # Handle retrying ourselves since the built-in support      # doesn't handle sleeping between tries... -    for i in range(0, manual_tries): +    # Infinitely retry if infinite is True +    for i in count() if infinite else range(0, manual_tries):          req_args['headers'] = headers_cb(url)          filtered_req_args = {}          for (k, v) in req_args.items(): @@ -229,7 +230,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,              filtered_req_args[k] = v          try:              LOG.debug("[%s/%s] open '%s' with %s configuration", i, -                      manual_tries, url, filtered_req_args) +                      "infinite" if infinite else manual_tries, url, +                      filtered_req_args)              if session is None:                  session = requests.Session() @@ -258,11 +260,13 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,                      # ssl exceptions are not going to get fixed by waiting a                      # few seconds                      break -            if exception_cb and exception_cb(req_args.copy(), excps[-1]): -                # if an exception callback was given it should return None -                # a true-ish value means to break and re-raise the exception +            if exception_cb and not exception_cb(req_args.copy(), excps[-1]): +                # if an exception callback was given, it should return True +                # to continue retrying and False to break and re-raise the +                # exception                  break -            if i + 1 < manual_tries and sec_between > 0: +            if (infinite and sec_between > 0) or \ +               (i + 1 < manual_tries and sec_between > 0):                  LOG.debug("Please wait %s seconds while we wait to try again",                            sec_between)                  time.sleep(sec_between) diff --git a/cloudinit/util.py b/cloudinit/util.py index 338fb971..0ab2c484 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -546,7 +546,7 @@ def is_ipv4(instr):          return False      try: -        toks = [x for x in toks if int(x) < 256 and int(x) >= 0] +        toks = [x for x in toks if 0 <= int(x) < 256]      except Exception:          return False @@ -716,8 +716,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):  def make_url(scheme, host, port=None,               path='', params='', query='', fragment=''): -    pieces = [] -    pieces.append(scheme or '') +    pieces = [scheme or '']      netloc = ''      if host: @@ -1026,9 +1025,16 @@ def dos2unix(contents):      return contents.replace('\r\n', '\n') -def get_hostname_fqdn(cfg, cloud): -    # return the hostname and fqdn from 'cfg'.  If not found in cfg, -    # then fall back to data from cloud +def get_hostname_fqdn(cfg, cloud, metadata_only=False): +    """Get hostname and fqdn from config if present and fallback to cloud. + +    @param cfg: Dictionary of merged user-data configuration (from init.cfg). +    @param cloud: Cloud instance from init.cloudify(). +    @param metadata_only: Boolean, set True to only query cloud meta-data, +        returning None if not present in meta-data. +    @return: a Tuple of strings <hostname>, <fqdn>. Values can be none when +        metadata_only is True and no cfg or metadata provides hostname info. +    """      if "fqdn" in cfg:          # user specified a fqdn.  Default hostname then is based off that          fqdn = cfg['fqdn'] @@ -1042,11 +1048,11 @@ def get_hostname_fqdn(cfg, cloud):          else:              # no fqdn set, get fqdn from cloud.              # get hostname from cfg if available otherwise cloud -            fqdn = cloud.get_hostname(fqdn=True) +            fqdn = cloud.get_hostname(fqdn=True, metadata_only=metadata_only)              if "hostname" in cfg:                  hostname = cfg['hostname']              else: -                hostname = cloud.get_hostname() +                hostname = cloud.get_hostname(metadata_only=metadata_only)      return (hostname, fqdn) @@ -1231,6 +1237,37 @@ def find_devs_with(criteria=None, oformat='device',      return entries +def blkid(devs=None, disable_cache=False): +    """Get all device tags details from blkid. + +    @param devs: Optional list of device paths you wish to query. +    @param disable_cache: Bool, set True to start with clean cache. + +    @return: Dict of key value pairs of info for the device. +    """ +    if devs is None: +        devs = [] +    else: +        devs = list(devs) + +    cmd = ['blkid', '-o', 'full'] +    if disable_cache: +        cmd.extend(['-c', '/dev/null']) +    cmd.extend(devs) + +    # we have to decode with 'replace' as shelx.split (called by +    # load_shell_content) can't take bytes.  So this is potentially +    # lossy of non-utf-8 chars in blkid output. +    out, _ = subp(cmd, capture=True, decode="replace") +    ret = {} +    for line in out.splitlines(): +        dev, _, data = line.partition(":") +        ret[dev] = load_shell_content(data) +        ret[dev]["DEVNAME"] = dev + +    return ret + +  def peek_file(fname, max_bytes):      LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)      with open(fname, 'rb') as ifh: @@ -1746,7 +1783,7 @@ def chmod(path, mode):  def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):      """      Writes a file with the given content and sets the file mode as specified. -    Resotres the SELinux context if possible. +    Restores the SELinux context if possible.      @param filename: The full path of the file to write.      @param content: The content to write to the file. @@ -1821,7 +1858,8 @@ def subp_blob_in_tempfile(blob, *args, **kwargs):  def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, -         logstring=False, decode="replace", target=None, update_env=None): +         logstring=False, decode="replace", target=None, update_env=None, +         status_cb=None):      # not supported in cloud-init (yet), for now kept in the call signature      # to ease maintaining code shared between cloud-init and curtin @@ -1842,6 +1880,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,      if target_path(target) != "/":          args = ['chroot', target] + list(args) +    if status_cb: +        command = ' '.join(args) if isinstance(args, list) else args +        status_cb('Begin run command: {command}\n'.format(command=command))      if not logstring:          LOG.debug(("Running command %s with allowed return codes %s"                     " (shell=%s, capture=%s)"), args, rcs, shell, capture) @@ -1865,12 +1906,25 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,          if not isinstance(data, bytes):              data = data.encode() +    # Popen converts entries in the arguments array from non-bytes to bytes. +    # When locale is unset it may use ascii for that encoding which can +    # cause UnicodeDecodeErrors. (LP: #1751051) +    if isinstance(args, six.binary_type): +        bytes_args = args +    elif isinstance(args, six.string_types): +        bytes_args = args.encode("utf-8") +    else: +        bytes_args = [ +            x if isinstance(x, six.binary_type) else x.encode("utf-8") +            for x in args]      try: -        sp = subprocess.Popen(args, stdout=stdout, +        sp = subprocess.Popen(bytes_args, stdout=stdout,                                stderr=stderr, stdin=stdin,                                env=env, shell=shell)          (out, err) = sp.communicate(data)      except OSError as e: +        if status_cb: +            status_cb('ERROR: End run command: invalid command provided\n')          raise ProcessExecutionError(              cmd=args, reason=e, errno=e.errno,              stdout="-" if decode else b"-", @@ -1895,9 +1949,14 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,      rc = sp.returncode      if rc not in rcs: +        if status_cb: +            status_cb( +                'ERROR: End run command: exit({code})\n'.format(code=rc))          raise ProcessExecutionError(stdout=out, stderr=err,                                      exit_code=rc,                                      cmd=args) +    if status_cb: +        status_cb('End run command: exit({code})\n'.format(code=rc))      return (out, err) @@ -1918,6 +1977,11 @@ def abs_join(*paths):  #    if it is an array, shell protect it (with single ticks)  #    if it is a string, do nothing  def shellify(cmdlist, add_header=True): +    if not isinstance(cmdlist, (tuple, list)): +        raise TypeError( +            "Input to shellify was type '%s'. Expected list or tuple." % +            (type_utils.obj_name(cmdlist))) +      content = ''      if add_header:          content += "#!/bin/sh\n" @@ -1926,7 +1990,7 @@ def shellify(cmdlist, add_header=True):      for args in cmdlist:          # If the item is a list, wrap all items in single tick.          # If its not, then just write it directly. -        if isinstance(args, list): +        if isinstance(args, (list, tuple)):              fixed = []              for f in args:                  fixed.append("'%s'" % (six.text_type(f).replace("'", escaped))) @@ -1936,9 +2000,10 @@ def shellify(cmdlist, add_header=True):              content = "%s%s\n" % (content, args)              cmds_made += 1          else: -            raise RuntimeError(("Unable to shellify type %s" -                                " which is not a list or string") -                               % (type_utils.obj_name(args))) +            raise TypeError( +                "Unable to shellify type '%s'. Expected list, string, tuple. " +                "Got: %s" % (type_utils.obj_name(args), args)) +      LOG.debug("Shellified %s commands.", cmds_made)      return content @@ -2169,7 +2234,7 @@ def get_path_dev_freebsd(path, mnt_list):      return path_found -def get_mount_info_freebsd(path, log=LOG): +def get_mount_info_freebsd(path):      (result, err) = subp(['mount', '-p', path], rcs=[0, 1])      if len(err):          # find a path if the input is not a mounting point @@ -2183,23 +2248,49 @@ def get_mount_info_freebsd(path, log=LOG):      return "/dev/" + label_part, ret[2], ret[1] +def get_device_info_from_zpool(zpool): +    (zpoolstatus, err) = subp(['zpool', 'status', zpool]) +    if len(err): +        return None +    r = r'.*(ONLINE).*' +    for line in zpoolstatus.split("\n"): +        if re.search(r, line) and zpool not in line and "state" not in line: +            disk = line.split()[0] +            LOG.debug('found zpool "%s" on disk %s', zpool, disk) +            return disk + +  def parse_mount(path): -    (mountoutput, _err) = subp("mount") +    (mountoutput, _err) = subp(['mount'])      mount_locs = mountoutput.splitlines() +    # there are 2 types of mount outputs we have to parse therefore +    # the regex is a bit complex. to better understand this regex see: +    # https://regex101.com/r/2F6c1k/1 +    # https://regex101.com/r/T2en7a/1 +    regex = r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + \ +            '(?=(?:type)[\s]+([\S]+)|\(([^,]*))'      for line in mount_locs: -        m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) +        m = re.search(regex, line)          if not m:              continue +        devpth = m.group(1) +        mount_point = m.group(2) +        # above regex will either fill the fs_type in group(3) +        # or group(4) depending on the format we have. +        fs_type = m.group(3) +        if fs_type is None: +            fs_type = m.group(4) +        LOG.debug('found line in mount -> devpth: %s, mount_point: %s, ' +                  'fs_type: %s', devpth, mount_point, fs_type)          # check whether the dev refers to a label on FreeBSD          # for example, if dev is '/dev/label/rootfs', we should          # continue finding the real device like '/dev/da0'. -        devm = re.search('^(/dev/.+)p([0-9])$', m.group(1)) -        if (not devm and is_FreeBSD()): +        # this is only valid for non zfs file systems as a zpool +        # can have gpt labels as disk. +        devm = re.search('^(/dev/.+)p([0-9])$', devpth) +        if not devm and is_FreeBSD() and fs_type != 'zfs':              return get_mount_info_freebsd(path) -        devpth = m.group(1) -        mount_point = m.group(2) -        fs_type = m.group(3) -        if mount_point == path: +        elif mount_point == path:              return devpth, fs_type, mount_point      return None diff --git a/cloudinit/version.py b/cloudinit/version.py index be6262d6..ccd0f84e 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@  #  # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "17.2" +__VERSION__ = "18.2"  FEATURES = [      # supports network config version 1 | 
