From 47016791ca5e97d80e45d3f100bc4e5d0b88627d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Dec 2017 17:05:29 -0500 Subject: tests: consolidate platforms into specific dirs This groups up each test platform into its own directory rather than having files spread between four different directories for one platform. Platforms tend to be worked on one at a time and so having the platforms together makes more sense than apart. --- tests/cloud_tests/bddeb.py | 3 +- tests/cloud_tests/collect.py | 8 +- tests/cloud_tests/images/__init__.py | 10 -- tests/cloud_tests/images/base.py | 56 ------ tests/cloud_tests/images/lxd.py | 194 --------------------- tests/cloud_tests/images/nocloudkvm.py | 90 ---------- tests/cloud_tests/instances/__init__.py | 10 -- tests/cloud_tests/instances/base.py | 77 -------- tests/cloud_tests/instances/lxd.py | 157 ----------------- tests/cloud_tests/instances/nocloudkvm.py | 179 ------------------- tests/cloud_tests/platforms/__init__.py | 20 ++- tests/cloud_tests/platforms/base.py | 27 --- tests/cloud_tests/platforms/images.py | 56 ++++++ tests/cloud_tests/platforms/instances.py | 77 ++++++++ tests/cloud_tests/platforms/lxd.py | 108 ------------ tests/cloud_tests/platforms/lxd/image.py | 193 ++++++++++++++++++++ tests/cloud_tests/platforms/lxd/instance.py | 157 +++++++++++++++++ tests/cloud_tests/platforms/lxd/platform.py | 108 ++++++++++++ tests/cloud_tests/platforms/lxd/snapshot.py | 53 ++++++ tests/cloud_tests/platforms/nocloudkvm.py | 91 ---------- tests/cloud_tests/platforms/nocloudkvm/image.py | 89 ++++++++++ tests/cloud_tests/platforms/nocloudkvm/instance.py | 179 +++++++++++++++++++ tests/cloud_tests/platforms/nocloudkvm/platform.py | 89 ++++++++++ tests/cloud_tests/platforms/nocloudkvm/snapshot.py | 79 +++++++++ tests/cloud_tests/platforms/platforms.py | 27 +++ tests/cloud_tests/platforms/snapshots.py | 45 +++++ tests/cloud_tests/snapshots/__init__.py | 10 -- tests/cloud_tests/snapshots/base.py | 45 ----- tests/cloud_tests/snapshots/lxd.py | 53 ------ tests/cloud_tests/snapshots/nocloudkvm.py | 79 --------- 30 files changed, 1176 insertions(+), 1193 deletions(-) delete mode 100644 tests/cloud_tests/images/__init__.py delete mode 100644 tests/cloud_tests/images/base.py delete mode 100644 tests/cloud_tests/images/lxd.py delete mode 100644 tests/cloud_tests/images/nocloudkvm.py delete mode 100644 tests/cloud_tests/instances/__init__.py delete mode 100644 tests/cloud_tests/instances/base.py delete mode 100644 tests/cloud_tests/instances/lxd.py delete mode 100644 tests/cloud_tests/instances/nocloudkvm.py delete mode 100644 tests/cloud_tests/platforms/base.py create mode 100644 tests/cloud_tests/platforms/images.py create mode 100644 tests/cloud_tests/platforms/instances.py delete mode 100644 tests/cloud_tests/platforms/lxd.py create mode 100644 tests/cloud_tests/platforms/lxd/image.py create mode 100644 tests/cloud_tests/platforms/lxd/instance.py create mode 100644 tests/cloud_tests/platforms/lxd/platform.py create mode 100644 tests/cloud_tests/platforms/lxd/snapshot.py delete mode 100644 tests/cloud_tests/platforms/nocloudkvm.py create mode 100644 tests/cloud_tests/platforms/nocloudkvm/image.py create mode 100644 tests/cloud_tests/platforms/nocloudkvm/instance.py create mode 100644 tests/cloud_tests/platforms/nocloudkvm/platform.py create mode 100644 tests/cloud_tests/platforms/nocloudkvm/snapshot.py create mode 100644 tests/cloud_tests/platforms/platforms.py create mode 100644 tests/cloud_tests/platforms/snapshots.py delete mode 100644 tests/cloud_tests/snapshots/__init__.py delete mode 100644 tests/cloud_tests/snapshots/base.py delete mode 100644 tests/cloud_tests/snapshots/lxd.py delete mode 100644 tests/cloud_tests/snapshots/nocloudkvm.py (limited to 'tests') diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py index fba8a0c7..c259dfea 100644 --- a/tests/cloud_tests/bddeb.py +++ b/tests/cloud_tests/bddeb.py @@ -8,7 +8,8 @@ import tempfile from cloudinit import util as c_util from tests.cloud_tests import (config, LOG) -from tests.cloud_tests import (platforms, images, snapshots, instances) +from tests.cloud_tests.platforms import (platforms, images, snapshots, + instances) from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) pre_reqs = ['devscripts', 'equivs', 'git', 'tar'] diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 71ee7645..db5ee99f 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -8,7 +8,7 @@ import os from cloudinit import util as c_util from tests.cloud_tests import (config, LOG, setup_image, util) from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) -from tests.cloud_tests import (platforms, images, snapshots, instances) +from tests.cloud_tests import platforms def collect_script(instance, base_dir, script, script_name): @@ -77,7 +77,7 @@ def collect_test_data(args, snapshot, os_name, test_name): # create test instance component = PlatformComponent( - partial(instances.get_instance, snapshot, user_data, + partial(platforms.get_instance, snapshot, user_data, block=True, start=False, use_desc=test_name)) LOG.info('collecting test data for test: %s', test_name) @@ -108,7 +108,7 @@ def collect_snapshot(args, image, os_name): """ res = ({}, 1) - component = PlatformComponent(partial(snapshots.get_snapshot, image)) + component = PlatformComponent(partial(platforms.get_snapshot, image)) LOG.debug('creating snapshot for %s', os_name) with component as snapshot: @@ -136,7 +136,7 @@ def collect_image(args, platform, os_name): feature_overrides=args.feature_override) LOG.debug('os config: %s', os_config) component = PlatformComponent( - partial(images.get_image, platform, os_config)) + partial(platforms.get_image, platform, os_config)) LOG.info('acquiring image for os: %s', os_name) with component as image: diff --git a/tests/cloud_tests/images/__init__.py b/tests/cloud_tests/images/__init__.py deleted file mode 100644 index 106c59f3..00000000 --- a/tests/cloud_tests/images/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Main init.""" - - -def get_image(platform, config): - """Get image from platform object using os_name.""" - return platform.get_image(config) - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/images/base.py b/tests/cloud_tests/images/base.py deleted file mode 100644 index d503108a..00000000 --- a/tests/cloud_tests/images/base.py +++ /dev/null @@ -1,56 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base class for images.""" - -from ..util import TargetBase - - -class Image(TargetBase): - """Base class for images.""" - - platform_name = None - - def __init__(self, platform, config): - """Set up image. - - @param platform: platform object - @param config: image configuration - """ - self.platform = platform - self.config = config - - def __str__(self): - """A brief description of the image.""" - return '-'.join((self.properties['os'], self.properties['release'])) - - @property - def properties(self): - """{} containing: 'arch', 'os', 'version', 'release'.""" - raise NotImplementedError - - @property - def features(self): - """Feature flags supported by this image. - - @return_value: list of feature names - """ - return [k for k, v in self.config.get('features', {}).items() if v] - - @property - def setup_overrides(self): - """Setup options that need to be overridden for the image. - - @return_value: dictionary to update args with - """ - # NOTE: more sophisticated options may be requied at some point - return self.config.get('setup_overrides', {}) - - def snapshot(self): - """Create snapshot of image, block until done.""" - raise NotImplementedError - - def destroy(self): - """Clean up data associated with image.""" - pass - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/images/lxd.py b/tests/cloud_tests/images/lxd.py deleted file mode 100644 index 5caeba41..00000000 --- a/tests/cloud_tests/images/lxd.py +++ /dev/null @@ -1,194 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""LXD Image Base Class.""" - -import os -import shutil -import tempfile - -from cloudinit import util as c_util -from tests.cloud_tests.images import base -from tests.cloud_tests.snapshots import lxd as lxd_snapshot -from tests.cloud_tests import util - - -class LXDImage(base.Image): - """LXD backed image.""" - - platform_name = "lxd" - - def __init__(self, platform, config, pylxd_image): - """Set up image. - - @param platform: platform object - @param config: image configuration - """ - self.modified = False - self._img_instance = None - self._pylxd_image = None - self.pylxd_image = pylxd_image - super(LXDImage, self).__init__(platform, config) - - @property - def pylxd_image(self): - """Property function.""" - if self._pylxd_image: - self._pylxd_image.sync() - return self._pylxd_image - - @pylxd_image.setter - def pylxd_image(self, pylxd_image): - if self._img_instance: - self._instance.destroy() - self._img_instance = None - if (self._pylxd_image and - (self._pylxd_image is not pylxd_image) and - (not self.config.get('cache_base_image') or self.modified)): - self._pylxd_image.delete(wait=True) - self.modified = False - self._pylxd_image = pylxd_image - - @property - def _instance(self): - """Internal use only, returns a instance - - This starts an lxc instance from the image, so it is "dirty". - Better would be some way to modify this "at rest". - lxc-pstart would be an option.""" - if not self._img_instance: - self._img_instance = self.platform.launch_container( - self.properties, self.config, self.features, - use_desc='image-modification', image_desc=str(self), - image=self.pylxd_image.fingerprint) - self._img_instance.start() - return self._img_instance - - @property - def properties(self): - """{} containing: 'arch', 'os', 'version', 'release'.""" - properties = self.pylxd_image.properties - return { - 'arch': properties.get('architecture'), - 'os': properties.get('os'), - 'version': properties.get('version'), - 'release': properties.get('release'), - } - - def export_image(self, output_dir): - """Export image from lxd image store to (split) tarball on disk. - - @param output_dir: dir to store tarballs in - @return_value: tuple of path to metadata tarball and rootfs tarball - """ - # pylxd's image export feature doesn't do split exports, so use cmdline - c_util.subp(['lxc', 'image', 'export', self.pylxd_image.fingerprint, - output_dir], capture=True) - tarballs = [p for p in os.listdir(output_dir) if p.endswith('tar.xz')] - metadata = os.path.join( - output_dir, next(p for p in tarballs if p.startswith('meta-'))) - rootfs = os.path.join( - output_dir, next(p for p in tarballs if not p.startswith('meta-'))) - return (metadata, rootfs) - - def import_image(self, metadata, rootfs): - """Import image to lxd image store from (split) tarball on disk. - - Note, this will replace and delete the current pylxd_image - - @param metadata: metadata tarball - @param rootfs: rootfs tarball - @return_value: imported image fingerprint - """ - alias = util.gen_instance_name( - image_desc=str(self), use_desc='update-metadata') - c_util.subp(['lxc', 'image', 'import', metadata, rootfs, - '--alias', alias], capture=True) - self.pylxd_image = self.platform.query_image_by_alias(alias) - return self.pylxd_image.fingerprint - - def update_templates(self, template_config, template_data): - """Update the image's template configuration. - - Note, this will replace and delete the current pylxd_image - - @param template_config: config overrides for template metadata - @param template_data: template data to place into templates/ - """ - # set up tmp files - export_dir = tempfile.mkdtemp(prefix='cloud_test_util_') - extract_dir = tempfile.mkdtemp(prefix='cloud_test_util_') - new_metadata = os.path.join(export_dir, 'new-meta.tar.xz') - metadata_yaml = os.path.join(extract_dir, 'metadata.yaml') - template_dir = os.path.join(extract_dir, 'templates') - - try: - # extract old data - (metadata, rootfs) = self.export_image(export_dir) - shutil.unpack_archive(metadata, extract_dir) - - # update metadata - metadata = c_util.read_conf(metadata_yaml) - templates = metadata.get('templates', {}) - templates.update(template_config) - metadata['templates'] = templates - util.yaml_dump(metadata, metadata_yaml) - - # write out template files - for name, content in template_data.items(): - path = os.path.join(template_dir, name) - c_util.write_file(path, content) - - # store new data, mark new image as modified - util.flat_tar(new_metadata, extract_dir) - self.import_image(new_metadata, rootfs) - self.modified = True - - finally: - # remove tmpfiles - shutil.rmtree(export_dir) - shutil.rmtree(extract_dir) - - def _execute(self, *args, **kwargs): - """Execute command in image, modifying image.""" - return self._instance._execute(*args, **kwargs) - - def push_file(self, local_path, remote_path): - """Copy file at 'local_path' to instance at 'remote_path'.""" - return self._instance.push_file(local_path, remote_path) - - def run_script(self, *args, **kwargs): - """Run script in image, modifying image. - - @return_value: script output - """ - return self._instance.run_script(*args, **kwargs) - - def snapshot(self): - """Create snapshot of image, block until done.""" - # get empty user data to pass in to instance - # if overrides for user data provided, use them - empty_userdata = util.update_user_data( - {}, self.config.get('user_data_overrides', {})) - conf = {'user.user-data': empty_userdata} - # clone current instance - instance = self.platform.launch_container( - self.properties, self.config, self.features, - container=self._instance.name, image_desc=str(self), - use_desc='snapshot', container_config=conf) - # wait for cloud-init before boot_clean_script is run to ensure - # /var/lib/cloud is removed cleanly - instance.start(wait=True, wait_for_cloud_init=True) - if self.config.get('boot_clean_script'): - instance.run_script(self.config.get('boot_clean_script')) - # freeze current instance and return snapshot - instance.freeze() - return lxd_snapshot.LXDSnapshot( - self.platform, self.properties, self.config, - self.features, instance) - - def destroy(self): - """Clean up data associated with image.""" - self.pylxd_image = None - super(LXDImage, self).destroy() - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/images/nocloudkvm.py b/tests/cloud_tests/images/nocloudkvm.py deleted file mode 100644 index 8678b07f..00000000 --- a/tests/cloud_tests/images/nocloudkvm.py +++ /dev/null @@ -1,90 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""NoCloud KVM Image Base Class.""" - -from cloudinit import util as c_util - -import os -import shutil -import tempfile - -from tests.cloud_tests.images import base -from tests.cloud_tests.snapshots import nocloudkvm as nocloud_kvm_snapshot - - -class NoCloudKVMImage(base.Image): - """NoCloud KVM backed image.""" - - platform_name = "nocloud-kvm" - - def __init__(self, platform, config, orig_img_path): - """Set up image. - - @param platform: platform object - @param config: image configuration - @param img_path: path to the image - """ - self.modified = False - self._workd = tempfile.mkdtemp(prefix='NoCloudKVMImage') - self._orig_img_path = orig_img_path - self._img_path = os.path.join(self._workd, - os.path.basename(self._orig_img_path)) - - c_util.subp(['qemu-img', 'create', '-f', 'qcow2', - '-b', orig_img_path, self._img_path]) - - super(NoCloudKVMImage, self).__init__(platform, config) - - @property - def properties(self): - """Dictionary containing: 'arch', 'os', 'version', 'release'.""" - return { - 'arch': self.config['arch'], - 'os': self.config['family'], - 'release': self.config['release'], - 'version': self.config['version'], - } - - def _execute(self, command, stdin=None, env=None): - """Execute command in image, modifying image.""" - return self.mount_image_callback(command, stdin=stdin, env=env) - - def mount_image_callback(self, command, stdin=None, env=None): - """Run mount-image-callback.""" - - env_args = [] - if env: - env_args = ['env'] + ["%s=%s" for k, v in env.items()] - - mic_chroot = ['sudo', 'mount-image-callback', '--system-mounts', - '--system-resolvconf', self._img_path, - '--', 'chroot', '_MOUNTPOINT_'] - try: - out, err = c_util.subp(mic_chroot + env_args + list(command), - data=stdin, decode=False) - return (out, err, 0) - except c_util.ProcessExecutionError as e: - return (e.stdout, e.stderr, e.exit_code) - - def snapshot(self): - """Create snapshot of image, block until done.""" - if not self._img_path: - raise RuntimeError() - - return nocloud_kvm_snapshot.NoCloudKVMSnapshot( - self.platform, self.properties, self.config, - self.features, self._img_path) - - def destroy(self): - """Unset path to signal image is no longer used. - - The removal of the images and all other items is handled by the - framework. In some cases we want to keep the images, so let the - framework decide whether to keep or destroy everything. - """ - self._img_path = None - shutil.rmtree(self._workd) - - super(NoCloudKVMImage, self).destroy() - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/instances/__init__.py b/tests/cloud_tests/instances/__init__.py deleted file mode 100644 index fc2e9cbc..00000000 --- a/tests/cloud_tests/instances/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Main init.""" - - -def get_instance(snapshot, *args, **kwargs): - """Get instance from snapshot.""" - return snapshot.launch(*args, **kwargs) - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/instances/base.py b/tests/cloud_tests/instances/base.py deleted file mode 100644 index 8c59d62c..00000000 --- a/tests/cloud_tests/instances/base.py +++ /dev/null @@ -1,77 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base instance.""" - -from ..util import TargetBase - - -class Instance(TargetBase): - """Base instance object.""" - - platform_name = None - - def __init__(self, platform, name, properties, config, features): - """Set up instance. - - @param platform: platform object - @param name: hostname of instance - @param properties: image properties - @param config: image config - @param features: supported feature flags - """ - self.platform = platform - self.name = name - self.properties = properties - self.config = config - self.features = features - self._tmp_count = 0 - - def console_log(self): - """Instance console. - - @return_value: bytes of this instance’s console - """ - raise NotImplementedError - - def reboot(self, wait=True): - """Reboot instance.""" - raise NotImplementedError - - def shutdown(self, wait=True): - """Shutdown instance.""" - raise NotImplementedError - - def start(self, wait=True, wait_for_cloud_init=False): - """Start instance.""" - raise NotImplementedError - - def destroy(self): - """Clean up instance.""" - pass - - def _wait_for_system(self, wait_for_cloud_init): - """Wait until system has fully booted and cloud-init has finished. - - @param wait_time: maximum time to wait - @return_value: None, may raise OSError if wait_time exceeded - """ - def clean_test(test): - """Clean formatting for system ready test testcase.""" - return ' '.join(l for l in test.strip().splitlines() - if not l.lstrip().startswith('#')) - - time = self.config['boot_timeout'] - tests = [self.config['system_ready_script']] - if wait_for_cloud_init: - tests.append(self.config['cloud_init_ready_script']) - - formatted_tests = ' && '.join(clean_test(t) for t in tests) - cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && ' - 'exit 0; sleep 1; done; exit 1').format(time=time, - test=formatted_tests) - - if self.execute(cmd, rcs=(0, 1))[-1] != 0: - raise OSError('timeout: after {}s system not started'.format(time)) - - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/instances/lxd.py b/tests/cloud_tests/instances/lxd.py deleted file mode 100644 index 3b035d86..00000000 --- a/tests/cloud_tests/instances/lxd.py +++ /dev/null @@ -1,157 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base LXD instance.""" - -from . import base - -import os -import shutil -from tempfile import mkdtemp - - -class LXDInstance(base.Instance): - """LXD container backed instance.""" - - platform_name = "lxd" - - def __init__(self, platform, name, properties, config, features, - pylxd_container): - """Set up instance. - - @param platform: platform object - @param name: hostname of instance - @param properties: image properties - @param config: image config - @param features: supported feature flags - """ - self._pylxd_container = pylxd_container - super(LXDInstance, self).__init__( - platform, name, properties, config, features) - self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name)) - self._setup_console_log() - - @property - def pylxd_container(self): - """Property function.""" - self._pylxd_container.sync() - return self._pylxd_container - - def _setup_console_log(self): - logf = os.path.join(self.tmpd, "console.log") - - # doing this ensures we can read it. Otherwise it ends up root:root. - with open(logf, "w") as fp: - fp.write("# %s\n" % self.name) - - cfg = "lxc.console.logfile=%s" % logf - orig = self._pylxd_container.config.get('raw.lxc', "") - if orig: - orig += "\n" - self._pylxd_container.config['raw.lxc'] = orig + cfg - self._pylxd_container.save() - self._console_log_file = logf - - def _execute(self, command, stdin=None, env=None): - if env is None: - env = {} - - if stdin is not None: - # pylxd does not support input to execute. - # https://github.com/lxc/pylxd/issues/244 - # - # The solution here is write a tmp file in the container - # and then execute a shell that sets it standard in to - # be from that file, removes it, and calls the comand. - tmpf = self.tmpfile() - self.write_data(tmpf, stdin) - ncmd = 'exec <"{tmpf}"; rm -f "{tmpf}"; exec "$@"' - command = (['sh', '-c', ncmd.format(tmpf=tmpf), 'stdinhack'] + - list(command)) - - # ensure instance is running and execute the command - self.start() - # execute returns a ContainerExecuteResult, named tuple - # (exit_code, stdout, stderr) - res = self.pylxd_container.execute(command, environment=env) - - # get out, exit and err from pylxd return - if not hasattr(res, 'exit_code'): - # pylxd 2.1.3 and earlier only return out and err, no exit - raise RuntimeError( - "No 'exit_code' in pylxd.container.execute return.\n" - "pylxd > 2.2 is required.") - - return res.stdout, res.stderr, res.exit_code - - def read_data(self, remote_path, decode=False): - """Read data from instance filesystem. - - @param remote_path: path in instance - @param decode: decode data before returning. - @return_value: content of remote_path as bytes if 'decode' is False, - and as string if 'decode' is True. - """ - data = self.pylxd_container.files.get(remote_path) - return data.decode() if decode else data - - def write_data(self, remote_path, data): - """Write data to instance filesystem. - - @param remote_path: path in instance - @param data: data to write in bytes - """ - self.pylxd_container.files.put(remote_path, data) - - def console_log(self): - """Console log. - - @return_value: bytes of this instance’s console - """ - if not os.path.exists(self._console_log_file): - raise NotImplementedError( - "Console log '%s' does not exist. If this is a remote " - "lxc, then this is really NotImplementedError. If it is " - "A local lxc, then this is a RuntimeError." - "https://github.com/lxc/lxd/issues/1129") - with open(self._console_log_file, "rb") as fp: - return fp.read() - - def reboot(self, wait=True): - """Reboot instance.""" - self.shutdown(wait=wait) - self.start(wait=wait) - - def shutdown(self, wait=True): - """Shutdown instance.""" - if self.pylxd_container.status != 'Stopped': - self.pylxd_container.stop(wait=wait) - - def start(self, wait=True, wait_for_cloud_init=False): - """Start instance.""" - if self.pylxd_container.status != 'Running': - self.pylxd_container.start(wait=wait) - if wait: - self._wait_for_system(wait_for_cloud_init) - - def freeze(self): - """Freeze instance.""" - if self.pylxd_container.status != 'Frozen': - self.pylxd_container.freeze(wait=True) - - def unfreeze(self): - """Unfreeze instance.""" - if self.pylxd_container.status == 'Frozen': - self.pylxd_container.unfreeze(wait=True) - - def destroy(self): - """Clean up instance.""" - self.unfreeze() - self.shutdown() - self.pylxd_container.delete(wait=True) - if self.platform.container_exists(self.name): - raise OSError('container {} was not properly removed' - .format(self.name)) - shutil.rmtree(self.tmpd) - super(LXDInstance, self).destroy() - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/instances/nocloudkvm.py b/tests/cloud_tests/instances/nocloudkvm.py deleted file mode 100644 index bc06a79e..00000000 --- a/tests/cloud_tests/instances/nocloudkvm.py +++ /dev/null @@ -1,179 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base NoCloud KVM instance.""" - -import os -import paramiko -import socket -import subprocess -import time - -from cloudinit import util as c_util -from tests.cloud_tests.instances import base -from tests.cloud_tests import util - -# This domain contains reverse lookups for hostnames that are used. -# The primary reason is so sudo will return quickly when it attempts -# to look up the hostname. i9n is just short for 'integration'. -# see also bug 1730744 for why we had to do this. -CI_DOMAIN = "i9n.cloud-init.io" - - -class NoCloudKVMInstance(base.Instance): - """NoCloud KVM backed instance.""" - - platform_name = "nocloud-kvm" - _ssh_client = None - - def __init__(self, platform, name, image_path, properties, config, - features, user_data, meta_data): - """Set up instance. - - @param platform: platform object - @param name: image path - @param image_path: path to disk image to boot. - @param properties: dictionary of properties - @param config: dictionary of configuration values - @param features: dictionary of supported feature flags - """ - self.user_data = user_data - self.meta_data = meta_data - self.ssh_key_file = os.path.join(platform.config['data_dir'], - platform.config['private_key']) - self.ssh_port = None - self.pid = None - self.pid_file = None - self.console_file = None - self.disk = image_path - - super(NoCloudKVMInstance, self).__init__( - platform, name, properties, config, features) - - def destroy(self): - """Clean up instance.""" - if self.pid: - try: - c_util.subp(['kill', '-9', self.pid]) - except util.ProcessExectuionError: - pass - - if self.pid_file: - os.remove(self.pid_file) - - self.pid = None - if self._ssh_client: - self._ssh_client.close() - self._ssh_client = None - - super(NoCloudKVMInstance, self).destroy() - - def _execute(self, command, stdin=None, env=None): - env_args = [] - if env: - env_args = ['env'] + ["%s=%s" for k, v in env.items()] - - return self.ssh(['sudo'] + env_args + list(command), stdin=stdin) - - def generate_seed(self, tmpdir): - """Generate nocloud seed from user-data""" - seed_file = os.path.join(tmpdir, '%s_seed.img' % self.name) - user_data_file = os.path.join(tmpdir, '%s_user_data' % self.name) - - with open(user_data_file, "w") as ud_file: - ud_file.write(self.user_data) - - c_util.subp(['cloud-localds', seed_file, user_data_file]) - - return seed_file - - def get_free_port(self): - """Get a free port assigned by the kernel.""" - s = socket.socket() - s.bind(('', 0)) - num = s.getsockname()[1] - s.close() - return num - - def ssh(self, command, stdin=None): - """Run a command via SSH.""" - client = self._ssh_connect() - - cmd = util.shell_pack(command) - try: - fp_in, fp_out, fp_err = client.exec_command(cmd) - channel = fp_in.channel - if stdin is not None: - fp_in.write(stdin) - fp_in.close() - - channel.shutdown_write() - rc = channel.recv_exit_status() - return (fp_out.read(), fp_err.read(), rc) - except paramiko.SSHException as e: - raise util.InTargetExecuteError( - b'', b'', -1, command, self.name, reason=e) - - def _ssh_connect(self, hostname='localhost', username='ubuntu', - banner_timeout=120, retry_attempts=30): - """Connect via SSH.""" - if self._ssh_client: - return self._ssh_client - - private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - while retry_attempts: - try: - client.connect(hostname=hostname, username=username, - port=self.ssh_port, pkey=private_key, - banner_timeout=banner_timeout) - self._ssh_client = client - return client - except (paramiko.SSHException, TypeError): - time.sleep(1) - retry_attempts = retry_attempts - 1 - - error_desc = 'Failed command to: %s@%s:%s' % (username, hostname, - self.ssh_port) - raise util.InTargetExecuteError('', '', -1, 'ssh connect', - self.name, error_desc) - - def start(self, wait=True, wait_for_cloud_init=False): - """Start instance.""" - tmpdir = self.platform.config['data_dir'] - seed = self.generate_seed(tmpdir) - self.pid_file = os.path.join(tmpdir, '%s.pid' % self.name) - self.console_file = os.path.join(tmpdir, '%s-console.log' % self.name) - self.ssh_port = self.get_free_port() - - cmd = ['./tools/xkvm', - '--disk', '%s,cache=unsafe' % self.disk, - '--disk', '%s,cache=unsafe' % seed, - '--netdev', ','.join(['user', - 'hostfwd=tcp::%s-:22' % self.ssh_port, - 'dnssearch=%s' % CI_DOMAIN]), - '--', '-pidfile', self.pid_file, '-vnc', 'none', - '-m', '2G', '-smp', '2', '-nographic', - '-serial', 'file:' + self.console_file] - subprocess.Popen(cmd, - close_fds=True, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - while not os.path.exists(self.pid_file): - time.sleep(1) - - with open(self.pid_file, 'r') as pid_f: - self.pid = pid_f.readlines()[0].strip() - - if wait: - self._wait_for_system(wait_for_cloud_init) - - def console_log(self): - if not self.console_file: - return b'' - with open(self.console_file, "rb") as fp: - return fp.read() - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py index 3490fe87..92ed1627 100644 --- a/tests/cloud_tests/platforms/__init__.py +++ b/tests/cloud_tests/platforms/__init__.py @@ -2,8 +2,8 @@ """Main init.""" -from tests.cloud_tests.platforms import lxd -from tests.cloud_tests.platforms import nocloudkvm +from .lxd import platform as lxd +from .nocloudkvm import platform as nocloudkvm PLATFORMS = { 'nocloud-kvm': nocloudkvm.NoCloudKVMPlatform, @@ -11,6 +11,16 @@ PLATFORMS = { } +def get_image(platform, config): + """Get image from platform object using os_name.""" + return platform.get_image(config) + + +def get_instance(snapshot, *args, **kwargs): + """Get instance from snapshot.""" + return snapshot.launch(*args, **kwargs) + + def get_platform(platform_name, config): """Get the platform object for 'platform_name' and init.""" platform_cls = PLATFORMS.get(platform_name) @@ -18,4 +28,10 @@ def get_platform(platform_name, config): raise ValueError('invalid platform name: {}'.format(platform_name)) return platform_cls(config) + +def get_snapshot(image): + """Get snapshot from image.""" + return image.snapshot() + + # vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/base.py b/tests/cloud_tests/platforms/base.py deleted file mode 100644 index 28975368..00000000 --- a/tests/cloud_tests/platforms/base.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base platform class.""" - - -class Platform(object): - """Base class for platforms.""" - - platform_name = None - - def __init__(self, config): - """Set up platform.""" - self.config = config - - def get_image(self, img_conf): - """Get image using specified image configuration. - - @param img_conf: configuration for image - @return_value: cloud_tests.images instance - """ - raise NotImplementedError - - def destroy(self): - """Clean up platform data.""" - pass - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/images.py b/tests/cloud_tests/platforms/images.py new file mode 100644 index 00000000..d503108a --- /dev/null +++ b/tests/cloud_tests/platforms/images.py @@ -0,0 +1,56 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base class for images.""" + +from ..util import TargetBase + + +class Image(TargetBase): + """Base class for images.""" + + platform_name = None + + def __init__(self, platform, config): + """Set up image. + + @param platform: platform object + @param config: image configuration + """ + self.platform = platform + self.config = config + + def __str__(self): + """A brief description of the image.""" + return '-'.join((self.properties['os'], self.properties['release'])) + + @property + def properties(self): + """{} containing: 'arch', 'os', 'version', 'release'.""" + raise NotImplementedError + + @property + def features(self): + """Feature flags supported by this image. + + @return_value: list of feature names + """ + return [k for k, v in self.config.get('features', {}).items() if v] + + @property + def setup_overrides(self): + """Setup options that need to be overridden for the image. + + @return_value: dictionary to update args with + """ + # NOTE: more sophisticated options may be requied at some point + return self.config.get('setup_overrides', {}) + + def snapshot(self): + """Create snapshot of image, block until done.""" + raise NotImplementedError + + def destroy(self): + """Clean up data associated with image.""" + pass + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py new file mode 100644 index 00000000..8c59d62c --- /dev/null +++ b/tests/cloud_tests/platforms/instances.py @@ -0,0 +1,77 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base instance.""" + +from ..util import TargetBase + + +class Instance(TargetBase): + """Base instance object.""" + + platform_name = None + + def __init__(self, platform, name, properties, config, features): + """Set up instance. + + @param platform: platform object + @param name: hostname of instance + @param properties: image properties + @param config: image config + @param features: supported feature flags + """ + self.platform = platform + self.name = name + self.properties = properties + self.config = config + self.features = features + self._tmp_count = 0 + + def console_log(self): + """Instance console. + + @return_value: bytes of this instance’s console + """ + raise NotImplementedError + + def reboot(self, wait=True): + """Reboot instance.""" + raise NotImplementedError + + def shutdown(self, wait=True): + """Shutdown instance.""" + raise NotImplementedError + + def start(self, wait=True, wait_for_cloud_init=False): + """Start instance.""" + raise NotImplementedError + + def destroy(self): + """Clean up instance.""" + pass + + def _wait_for_system(self, wait_for_cloud_init): + """Wait until system has fully booted and cloud-init has finished. + + @param wait_time: maximum time to wait + @return_value: None, may raise OSError if wait_time exceeded + """ + def clean_test(test): + """Clean formatting for system ready test testcase.""" + return ' '.join(l for l in test.strip().splitlines() + if not l.lstrip().startswith('#')) + + time = self.config['boot_timeout'] + tests = [self.config['system_ready_script']] + if wait_for_cloud_init: + tests.append(self.config['cloud_init_ready_script']) + + formatted_tests = ' && '.join(clean_test(t) for t in tests) + cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && ' + 'exit 0; sleep 1; done; exit 1').format(time=time, + test=formatted_tests) + + if self.execute(cmd, rcs=(0, 1))[-1] != 0: + raise OSError('timeout: after {}s system not started'.format(time)) + + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/lxd.py b/tests/cloud_tests/platforms/lxd.py deleted file mode 100644 index ead0955b..00000000 --- a/tests/cloud_tests/platforms/lxd.py +++ /dev/null @@ -1,108 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base LXD platform.""" - -from pylxd import (Client, exceptions) - -from tests.cloud_tests.images import lxd as lxd_image -from tests.cloud_tests.instances import lxd as lxd_instance -from tests.cloud_tests.platforms import base -from tests.cloud_tests import util - -DEFAULT_SSTREAMS_SERVER = "https://images.linuxcontainers.org:8443" - - -class LXDPlatform(base.Platform): - """LXD test platform.""" - - platform_name = 'lxd' - - def __init__(self, config): - """Set up platform.""" - super(LXDPlatform, self).__init__(config) - # TODO: allow configuration of remote lxd host via env variables - # set up lxd connection - self.client = Client() - - def get_image(self, img_conf): - """Get image using specified image configuration. - - @param img_conf: configuration for image - @return_value: cloud_tests.images instance - """ - pylxd_image = self.client.images.create_from_simplestreams( - img_conf.get('sstreams_server', DEFAULT_SSTREAMS_SERVER), - img_conf['alias']) - image = lxd_image.LXDImage(self, img_conf, pylxd_image) - if img_conf.get('override_templates', False): - image.update_templates(self.config.get('template_overrides', {}), - self.config.get('template_files', {})) - return image - - def launch_container(self, properties, config, features, - image=None, container=None, ephemeral=False, - container_config=None, block=True, image_desc=None, - use_desc=None): - """Launch a container. - - @param properties: image properties - @param config: image configuration - @param features: image features - @param image: image fingerprint to launch from - @param container: container to copy - @param ephemeral: delete image after first shutdown - @param container_config: config options for instance as dict - @param block: wait until container created - @param image_desc: description of image being launched - @param use_desc: description of container's use - @return_value: cloud_tests.instances instance - """ - if not (image or container): - raise ValueError("either image or container must be specified") - container = self.client.containers.create({ - 'name': util.gen_instance_name(image_desc=image_desc, - use_desc=use_desc, - used_list=self.list_containers()), - 'ephemeral': bool(ephemeral), - 'config': (container_config - if isinstance(container_config, dict) else {}), - 'source': ({'type': 'image', 'fingerprint': image} if image else - {'type': 'copy', 'source': container}) - }, wait=block) - return lxd_instance.LXDInstance(self, container.name, properties, - config, features, container) - - def container_exists(self, container_name): - """Check if container with name 'container_name' exists. - - @return_value: True if exists else False - """ - res = True - try: - self.client.containers.get(container_name) - except exceptions.LXDAPIException as e: - res = False - if e.response.status_code != 404: - raise - return res - - def list_containers(self): - """List names of all containers. - - @return_value: list of names - """ - return [container.name for container in self.client.containers.all()] - - def query_image_by_alias(self, alias): - """Get image by alias in local image store. - - @param alias: alias of image - @return_value: pylxd image (not cloud_tests.images instance) - """ - return self.client.images.get_by_alias(alias) - - def destroy(self): - """Clean up platform data.""" - super(LXDPlatform, self).destroy() - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/lxd/image.py b/tests/cloud_tests/platforms/lxd/image.py new file mode 100644 index 00000000..b5de1f52 --- /dev/null +++ b/tests/cloud_tests/platforms/lxd/image.py @@ -0,0 +1,193 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""LXD Image Base Class.""" + +import os +import shutil +import tempfile + +from ..images import Image +from .snapshot import LXDSnapshot +from cloudinit import util as c_util +from tests.cloud_tests import util + + +class LXDImage(Image): + """LXD backed image.""" + + platform_name = "lxd" + + def __init__(self, platform, config, pylxd_image): + """Set up image. + + @param platform: platform object + @param config: image configuration + """ + self.modified = False + self._img_instance = None + self._pylxd_image = None + self.pylxd_image = pylxd_image + super(LXDImage, self).__init__(platform, config) + + @property + def pylxd_image(self): + """Property function.""" + if self._pylxd_image: + self._pylxd_image.sync() + return self._pylxd_image + + @pylxd_image.setter + def pylxd_image(self, pylxd_image): + if self._img_instance: + self._instance.destroy() + self._img_instance = None + if (self._pylxd_image and + (self._pylxd_image is not pylxd_image) and + (not self.config.get('cache_base_image') or self.modified)): + self._pylxd_image.delete(wait=True) + self.modified = False + self._pylxd_image = pylxd_image + + @property + def _instance(self): + """Internal use only, returns a instance + + This starts an lxc instance from the image, so it is "dirty". + Better would be some way to modify this "at rest". + lxc-pstart would be an option.""" + if not self._img_instance: + self._img_instance = self.platform.launch_container( + self.properties, self.config, self.features, + use_desc='image-modification', image_desc=str(self), + image=self.pylxd_image.fingerprint) + self._img_instance.start() + return self._img_instance + + @property + def properties(self): + """{} containing: 'arch', 'os', 'version', 'release'.""" + properties = self.pylxd_image.properties + return { + 'arch': properties.get('architecture'), + 'os': properties.get('os'), + 'version': properties.get('version'), + 'release': properties.get('release'), + } + + def export_image(self, output_dir): + """Export image from lxd image store to (split) tarball on disk. + + @param output_dir: dir to store tarballs in + @return_value: tuple of path to metadata tarball and rootfs tarball + """ + # pylxd's image export feature doesn't do split exports, so use cmdline + c_util.subp(['lxc', 'image', 'export', self.pylxd_image.fingerprint, + output_dir], capture=True) + tarballs = [p for p in os.listdir(output_dir) if p.endswith('tar.xz')] + metadata = os.path.join( + output_dir, next(p for p in tarballs if p.startswith('meta-'))) + rootfs = os.path.join( + output_dir, next(p for p in tarballs if not p.startswith('meta-'))) + return (metadata, rootfs) + + def import_image(self, metadata, rootfs): + """Import image to lxd image store from (split) tarball on disk. + + Note, this will replace and delete the current pylxd_image + + @param metadata: metadata tarball + @param rootfs: rootfs tarball + @return_value: imported image fingerprint + """ + alias = util.gen_instance_name( + image_desc=str(self), use_desc='update-metadata') + c_util.subp(['lxc', 'image', 'import', metadata, rootfs, + '--alias', alias], capture=True) + self.pylxd_image = self.platform.query_image_by_alias(alias) + return self.pylxd_image.fingerprint + + def update_templates(self, template_config, template_data): + """Update the image's template configuration. + + Note, this will replace and delete the current pylxd_image + + @param template_config: config overrides for template metadata + @param template_data: template data to place into templates/ + """ + # set up tmp files + export_dir = tempfile.mkdtemp(prefix='cloud_test_util_') + extract_dir = tempfile.mkdtemp(prefix='cloud_test_util_') + new_metadata = os.path.join(export_dir, 'new-meta.tar.xz') + metadata_yaml = os.path.join(extract_dir, 'metadata.yaml') + template_dir = os.path.join(extract_dir, 'templates') + + try: + # extract old data + (metadata, rootfs) = self.export_image(export_dir) + shutil.unpack_archive(metadata, extract_dir) + + # update metadata + metadata = c_util.read_conf(metadata_yaml) + templates = metadata.get('templates', {}) + templates.update(template_config) + metadata['templates'] = templates + util.yaml_dump(metadata, metadata_yaml) + + # write out template files + for name, content in template_data.items(): + path = os.path.join(template_dir, name) + c_util.write_file(path, content) + + # store new data, mark new image as modified + util.flat_tar(new_metadata, extract_dir) + self.import_image(new_metadata, rootfs) + self.modified = True + + finally: + # remove tmpfiles + shutil.rmtree(export_dir) + shutil.rmtree(extract_dir) + + def _execute(self, *args, **kwargs): + """Execute command in image, modifying image.""" + return self._instance._execute(*args, **kwargs) + + def push_file(self, local_path, remote_path): + """Copy file at 'local_path' to instance at 'remote_path'.""" + return self._instance.push_file(local_path, remote_path) + + def run_script(self, *args, **kwargs): + """Run script in image, modifying image. + + @return_value: script output + """ + return self._instance.run_script(*args, **kwargs) + + def snapshot(self): + """Create snapshot of image, block until done.""" + # get empty user data to pass in to instance + # if overrides for user data provided, use them + empty_userdata = util.update_user_data( + {}, self.config.get('user_data_overrides', {})) + conf = {'user.user-data': empty_userdata} + # clone current instance + instance = self.platform.launch_container( + self.properties, self.config, self.features, + container=self._instance.name, image_desc=str(self), + use_desc='snapshot', container_config=conf) + # wait for cloud-init before boot_clean_script is run to ensure + # /var/lib/cloud is removed cleanly + instance.start(wait=True, wait_for_cloud_init=True) + if self.config.get('boot_clean_script'): + instance.run_script(self.config.get('boot_clean_script')) + # freeze current instance and return snapshot + instance.freeze() + return LXDSnapshot(self.platform, self.properties, self.config, + self.features, instance) + + def destroy(self): + """Clean up data associated with image.""" + self.pylxd_image = None + super(LXDImage, self).destroy() + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py new file mode 100644 index 00000000..0d697c05 --- /dev/null +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -0,0 +1,157 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base LXD instance.""" + +import os +import shutil +from tempfile import mkdtemp + +from ..instances import Instance + + +class LXDInstance(Instance): + """LXD container backed instance.""" + + platform_name = "lxd" + + def __init__(self, platform, name, properties, config, features, + pylxd_container): + """Set up instance. + + @param platform: platform object + @param name: hostname of instance + @param properties: image properties + @param config: image config + @param features: supported feature flags + """ + self._pylxd_container = pylxd_container + super(LXDInstance, self).__init__( + platform, name, properties, config, features) + self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name)) + self._setup_console_log() + + @property + def pylxd_container(self): + """Property function.""" + self._pylxd_container.sync() + return self._pylxd_container + + def _setup_console_log(self): + logf = os.path.join(self.tmpd, "console.log") + + # doing this ensures we can read it. Otherwise it ends up root:root. + with open(logf, "w") as fp: + fp.write("# %s\n" % self.name) + + cfg = "lxc.console.logfile=%s" % logf + orig = self._pylxd_container.config.get('raw.lxc', "") + if orig: + orig += "\n" + self._pylxd_container.config['raw.lxc'] = orig + cfg + self._pylxd_container.save() + self._console_log_file = logf + + def _execute(self, command, stdin=None, env=None): + if env is None: + env = {} + + if stdin is not None: + # pylxd does not support input to execute. + # https://github.com/lxc/pylxd/issues/244 + # + # The solution here is write a tmp file in the container + # and then execute a shell that sets it standard in to + # be from that file, removes it, and calls the comand. + tmpf = self.tmpfile() + self.write_data(tmpf, stdin) + ncmd = 'exec <"{tmpf}"; rm -f "{tmpf}"; exec "$@"' + command = (['sh', '-c', ncmd.format(tmpf=tmpf), 'stdinhack'] + + list(command)) + + # ensure instance is running and execute the command + self.start() + # execute returns a ContainerExecuteResult, named tuple + # (exit_code, stdout, stderr) + res = self.pylxd_container.execute(command, environment=env) + + # get out, exit and err from pylxd return + if not hasattr(res, 'exit_code'): + # pylxd 2.1.3 and earlier only return out and err, no exit + raise RuntimeError( + "No 'exit_code' in pylxd.container.execute return.\n" + "pylxd > 2.2 is required.") + + return res.stdout, res.stderr, res.exit_code + + def read_data(self, remote_path, decode=False): + """Read data from instance filesystem. + + @param remote_path: path in instance + @param decode: decode data before returning. + @return_value: content of remote_path as bytes if 'decode' is False, + and as string if 'decode' is True. + """ + data = self.pylxd_container.files.get(remote_path) + return data.decode() if decode else data + + def write_data(self, remote_path, data): + """Write data to instance filesystem. + + @param remote_path: path in instance + @param data: data to write in bytes + """ + self.pylxd_container.files.put(remote_path, data) + + def console_log(self): + """Console log. + + @return_value: bytes of this instance’s console + """ + if not os.path.exists(self._console_log_file): + raise NotImplementedError( + "Console log '%s' does not exist. If this is a remote " + "lxc, then this is really NotImplementedError. If it is " + "A local lxc, then this is a RuntimeError." + "https://github.com/lxc/lxd/issues/1129") + with open(self._console_log_file, "rb") as fp: + return fp.read() + + def reboot(self, wait=True): + """Reboot instance.""" + self.shutdown(wait=wait) + self.start(wait=wait) + + def shutdown(self, wait=True): + """Shutdown instance.""" + if self.pylxd_container.status != 'Stopped': + self.pylxd_container.stop(wait=wait) + + def start(self, wait=True, wait_for_cloud_init=False): + """Start instance.""" + if self.pylxd_container.status != 'Running': + self.pylxd_container.start(wait=wait) + if wait: + self._wait_for_system(wait_for_cloud_init) + + def freeze(self): + """Freeze instance.""" + if self.pylxd_container.status != 'Frozen': + self.pylxd_container.freeze(wait=True) + + def unfreeze(self): + """Unfreeze instance.""" + if self.pylxd_container.status == 'Frozen': + self.pylxd_container.unfreeze(wait=True) + + def destroy(self): + """Clean up instance.""" + self.unfreeze() + self.shutdown() + self.pylxd_container.delete(wait=True) + if self.platform.container_exists(self.name): + raise OSError('container {} was not properly removed' + .format(self.name)) + shutil.rmtree(self.tmpd) + super(LXDInstance, self).destroy() + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/lxd/platform.py b/tests/cloud_tests/platforms/lxd/platform.py new file mode 100644 index 00000000..6a016929 --- /dev/null +++ b/tests/cloud_tests/platforms/lxd/platform.py @@ -0,0 +1,108 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base LXD platform.""" + +from pylxd import (Client, exceptions) + +from ..platforms import Platform +from .image import LXDImage +from .instance import LXDInstance +from tests.cloud_tests import util + +DEFAULT_SSTREAMS_SERVER = "https://images.linuxcontainers.org:8443" + + +class LXDPlatform(Platform): + """LXD test platform.""" + + platform_name = 'lxd' + + def __init__(self, config): + """Set up platform.""" + super(LXDPlatform, self).__init__(config) + # TODO: allow configuration of remote lxd host via env variables + # set up lxd connection + self.client = Client() + + def get_image(self, img_conf): + """Get image using specified image configuration. + + @param img_conf: configuration for image + @return_value: cloud_tests.images instance + """ + pylxd_image = self.client.images.create_from_simplestreams( + img_conf.get('sstreams_server', DEFAULT_SSTREAMS_SERVER), + img_conf['alias']) + image = LXDImage(self, img_conf, pylxd_image) + if img_conf.get('override_templates', False): + image.update_templates(self.config.get('template_overrides', {}), + self.config.get('template_files', {})) + return image + + def launch_container(self, properties, config, features, + image=None, container=None, ephemeral=False, + container_config=None, block=True, image_desc=None, + use_desc=None): + """Launch a container. + + @param properties: image properties + @param config: image configuration + @param features: image features + @param image: image fingerprint to launch from + @param container: container to copy + @param ephemeral: delete image after first shutdown + @param container_config: config options for instance as dict + @param block: wait until container created + @param image_desc: description of image being launched + @param use_desc: description of container's use + @return_value: cloud_tests.instances instance + """ + if not (image or container): + raise ValueError("either image or container must be specified") + container = self.client.containers.create({ + 'name': util.gen_instance_name(image_desc=image_desc, + use_desc=use_desc, + used_list=self.list_containers()), + 'ephemeral': bool(ephemeral), + 'config': (container_config + if isinstance(container_config, dict) else {}), + 'source': ({'type': 'image', 'fingerprint': image} if image else + {'type': 'copy', 'source': container}) + }, wait=block) + return LXDInstance(self, container.name, properties, config, features, + container) + + def container_exists(self, container_name): + """Check if container with name 'container_name' exists. + + @return_value: True if exists else False + """ + res = True + try: + self.client.containers.get(container_name) + except exceptions.LXDAPIException as e: + res = False + if e.response.status_code != 404: + raise + return res + + def list_containers(self): + """List names of all containers. + + @return_value: list of names + """ + return [container.name for container in self.client.containers.all()] + + def query_image_by_alias(self, alias): + """Get image by alias in local image store. + + @param alias: alias of image + @return_value: pylxd image (not cloud_tests.images instance) + """ + return self.client.images.get_by_alias(alias) + + def destroy(self): + """Clean up platform data.""" + super(LXDPlatform, self).destroy() + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/lxd/snapshot.py b/tests/cloud_tests/platforms/lxd/snapshot.py new file mode 100644 index 00000000..b524644f --- /dev/null +++ b/tests/cloud_tests/platforms/lxd/snapshot.py @@ -0,0 +1,53 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base LXD snapshot.""" + +from ..snapshots import Snapshot + + +class LXDSnapshot(Snapshot): + """LXD image copy backed snapshot.""" + + platform_name = "lxd" + + def __init__(self, platform, properties, config, features, + pylxd_frozen_instance): + """Set up snapshot. + + @param platform: platform object + @param properties: image properties + @param config: image config + @param features: supported feature flags + """ + self.pylxd_frozen_instance = pylxd_frozen_instance + super(LXDSnapshot, self).__init__( + platform, properties, config, features) + + def launch(self, user_data, meta_data=None, block=True, start=True, + use_desc=None): + """Launch instance. + + @param user_data: user-data for the instance + @param instance_id: instance-id for the instance + @param block: wait until instance is created + @param start: start instance and wait until fully started + @param use_desc: description of snapshot instance use + @return_value: an Instance + """ + inst_config = {'user.user-data': user_data} + if meta_data: + inst_config['user.meta-data'] = meta_data + instance = self.platform.launch_container( + self.properties, self.config, self.features, block=block, + image_desc=str(self), container=self.pylxd_frozen_instance.name, + use_desc=use_desc, container_config=inst_config) + if start: + instance.start() + return instance + + def destroy(self): + """Clean up snapshot data.""" + self.pylxd_frozen_instance.destroy() + super(LXDSnapshot, self).destroy() + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/nocloudkvm.py b/tests/cloud_tests/platforms/nocloudkvm.py deleted file mode 100644 index 76cd83ad..00000000 --- a/tests/cloud_tests/platforms/nocloudkvm.py +++ /dev/null @@ -1,91 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base NoCloud KVM platform.""" -import glob -import os - -from simplestreams import filters -from simplestreams import mirrors -from simplestreams import objectstores -from simplestreams import util as s_util - -from cloudinit import util as c_util -from tests.cloud_tests.images import nocloudkvm as nocloud_kvm_image -from tests.cloud_tests.instances import nocloudkvm as nocloud_kvm_instance -from tests.cloud_tests.platforms import base -from tests.cloud_tests import util - - -class NoCloudKVMPlatform(base.Platform): - """NoCloud KVM test platform.""" - - platform_name = 'nocloud-kvm' - - def get_image(self, img_conf): - """Get image using specified image configuration. - - @param img_conf: configuration for image - @return_value: cloud_tests.images instance - """ - (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None) - - filter = filters.get_filters(['arch=%s' % c_util.get_architecture(), - 'release=%s' % img_conf['release'], - 'ftype=disk1.img']) - mirror_config = {'filters': filter, - 'keep_items': False, - 'max_items': 1, - 'checksumming_reader': True, - 'item_download': True - } - - def policy(content, path): - return s_util.read_signed(content, keyring=img_conf['keyring']) - - smirror = mirrors.UrlMirrorReader(url, policy=policy) - tstore = objectstores.FileStore(img_conf['mirror_dir']) - tmirror = mirrors.ObjectFilterMirror(config=mirror_config, - objectstore=tstore) - tmirror.sync(smirror, path) - - search_d = os.path.join(img_conf['mirror_dir'], '**', - img_conf['release'], '**', '*.img') - - images = [] - for fname in glob.iglob(search_d, recursive=True): - images.append(fname) - - if len(images) < 1: - raise RuntimeError("No images found under '%s'" % search_d) - if len(images) > 1: - raise RuntimeError( - "Multiple images found in '%s': %s" % (search_d, - ' '.join(images))) - - image = nocloud_kvm_image.NoCloudKVMImage(self, img_conf, images[0]) - return image - - def create_instance(self, properties, config, features, - src_img_path, image_desc=None, use_desc=None, - user_data=None, meta_data=None): - """Create an instance - - @param src_img_path: image path to launch from - @param properties: image properties - @param config: image configuration - @param features: image features - @param image_desc: description of image being launched - @param use_desc: description of container's use - @return_value: cloud_tests.instances instance - """ - name = util.gen_instance_name(image_desc=image_desc, use_desc=use_desc) - img_path = os.path.join(self.config['data_dir'], name + '.qcow2') - c_util.subp(['qemu-img', 'create', '-f', 'qcow2', - '-b', src_img_path, img_path]) - - return nocloud_kvm_instance.NoCloudKVMInstance(self, name, img_path, - properties, config, - features, user_data, - meta_data) - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/nocloudkvm/image.py b/tests/cloud_tests/platforms/nocloudkvm/image.py new file mode 100644 index 00000000..09ff2a3b --- /dev/null +++ b/tests/cloud_tests/platforms/nocloudkvm/image.py @@ -0,0 +1,89 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""NoCloud KVM Image Base Class.""" + +from cloudinit import util as c_util + +import os +import shutil +import tempfile + +from ..images import Image +from .snapshot import NoCloudKVMSnapshot + + +class NoCloudKVMImage(Image): + """NoCloud KVM backed image.""" + + platform_name = "nocloud-kvm" + + def __init__(self, platform, config, orig_img_path): + """Set up image. + + @param platform: platform object + @param config: image configuration + @param img_path: path to the image + """ + self.modified = False + self._workd = tempfile.mkdtemp(prefix='NoCloudKVMImage') + self._orig_img_path = orig_img_path + self._img_path = os.path.join(self._workd, + os.path.basename(self._orig_img_path)) + + c_util.subp(['qemu-img', 'create', '-f', 'qcow2', + '-b', orig_img_path, self._img_path]) + + super(NoCloudKVMImage, self).__init__(platform, config) + + @property + def properties(self): + """Dictionary containing: 'arch', 'os', 'version', 'release'.""" + return { + 'arch': self.config['arch'], + 'os': self.config['family'], + 'release': self.config['release'], + 'version': self.config['version'], + } + + def _execute(self, command, stdin=None, env=None): + """Execute command in image, modifying image.""" + return self.mount_image_callback(command, stdin=stdin, env=env) + + def mount_image_callback(self, command, stdin=None, env=None): + """Run mount-image-callback.""" + + env_args = [] + if env: + env_args = ['env'] + ["%s=%s" for k, v in env.items()] + + mic_chroot = ['sudo', 'mount-image-callback', '--system-mounts', + '--system-resolvconf', self._img_path, + '--', 'chroot', '_MOUNTPOINT_'] + try: + out, err = c_util.subp(mic_chroot + env_args + list(command), + data=stdin, decode=False) + return (out, err, 0) + except c_util.ProcessExecutionError as e: + return (e.stdout, e.stderr, e.exit_code) + + def snapshot(self): + """Create snapshot of image, block until done.""" + if not self._img_path: + raise RuntimeError() + + return NoCloudKVMSnapshot(self.platform, self.properties, self.config, + self.features, self._img_path) + + def destroy(self): + """Unset path to signal image is no longer used. + + The removal of the images and all other items is handled by the + framework. In some cases we want to keep the images, so let the + framework decide whether to keep or destroy everything. + """ + self._img_path = None + shutil.rmtree(self._workd) + + super(NoCloudKVMImage, self).destroy() + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py new file mode 100644 index 00000000..a87d76a6 --- /dev/null +++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py @@ -0,0 +1,179 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base NoCloud KVM instance.""" + +import os +import paramiko +import socket +import subprocess +import time + +from ..instances import Instance +from cloudinit import util as c_util +from tests.cloud_tests import util + +# This domain contains reverse lookups for hostnames that are used. +# The primary reason is so sudo will return quickly when it attempts +# to look up the hostname. i9n is just short for 'integration'. +# see also bug 1730744 for why we had to do this. +CI_DOMAIN = "i9n.cloud-init.io" + + +class NoCloudKVMInstance(Instance): + """NoCloud KVM backed instance.""" + + platform_name = "nocloud-kvm" + _ssh_client = None + + def __init__(self, platform, name, image_path, properties, config, + features, user_data, meta_data): + """Set up instance. + + @param platform: platform object + @param name: image path + @param image_path: path to disk image to boot. + @param properties: dictionary of properties + @param config: dictionary of configuration values + @param features: dictionary of supported feature flags + """ + self.user_data = user_data + self.meta_data = meta_data + self.ssh_key_file = os.path.join(platform.config['data_dir'], + platform.config['private_key']) + self.ssh_port = None + self.pid = None + self.pid_file = None + self.console_file = None + self.disk = image_path + + super(NoCloudKVMInstance, self).__init__( + platform, name, properties, config, features) + + def destroy(self): + """Clean up instance.""" + if self.pid: + try: + c_util.subp(['kill', '-9', self.pid]) + except util.ProcessExectuionError: + pass + + if self.pid_file: + os.remove(self.pid_file) + + self.pid = None + if self._ssh_client: + self._ssh_client.close() + self._ssh_client = None + + super(NoCloudKVMInstance, self).destroy() + + def _execute(self, command, stdin=None, env=None): + env_args = [] + if env: + env_args = ['env'] + ["%s=%s" for k, v in env.items()] + + return self.ssh(['sudo'] + env_args + list(command), stdin=stdin) + + def generate_seed(self, tmpdir): + """Generate nocloud seed from user-data""" + seed_file = os.path.join(tmpdir, '%s_seed.img' % self.name) + user_data_file = os.path.join(tmpdir, '%s_user_data' % self.name) + + with open(user_data_file, "w") as ud_file: + ud_file.write(self.user_data) + + c_util.subp(['cloud-localds', seed_file, user_data_file]) + + return seed_file + + def get_free_port(self): + """Get a free port assigned by the kernel.""" + s = socket.socket() + s.bind(('', 0)) + num = s.getsockname()[1] + s.close() + return num + + def ssh(self, command, stdin=None): + """Run a command via SSH.""" + client = self._ssh_connect() + + cmd = util.shell_pack(command) + try: + fp_in, fp_out, fp_err = client.exec_command(cmd) + channel = fp_in.channel + if stdin is not None: + fp_in.write(stdin) + fp_in.close() + + channel.shutdown_write() + rc = channel.recv_exit_status() + return (fp_out.read(), fp_err.read(), rc) + except paramiko.SSHException as e: + raise util.InTargetExecuteError( + b'', b'', -1, command, self.name, reason=e) + + def _ssh_connect(self, hostname='localhost', username='ubuntu', + banner_timeout=120, retry_attempts=30): + """Connect via SSH.""" + if self._ssh_client: + return self._ssh_client + + private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + while retry_attempts: + try: + client.connect(hostname=hostname, username=username, + port=self.ssh_port, pkey=private_key, + banner_timeout=banner_timeout) + self._ssh_client = client + return client + except (paramiko.SSHException, TypeError): + time.sleep(1) + retry_attempts = retry_attempts - 1 + + error_desc = 'Failed command to: %s@%s:%s' % (username, hostname, + self.ssh_port) + raise util.InTargetExecuteError('', '', -1, 'ssh connect', + self.name, error_desc) + + def start(self, wait=True, wait_for_cloud_init=False): + """Start instance.""" + tmpdir = self.platform.config['data_dir'] + seed = self.generate_seed(tmpdir) + self.pid_file = os.path.join(tmpdir, '%s.pid' % self.name) + self.console_file = os.path.join(tmpdir, '%s-console.log' % self.name) + self.ssh_port = self.get_free_port() + + cmd = ['./tools/xkvm', + '--disk', '%s,cache=unsafe' % self.disk, + '--disk', '%s,cache=unsafe' % seed, + '--netdev', ','.join(['user', + 'hostfwd=tcp::%s-:22' % self.ssh_port, + 'dnssearch=%s' % CI_DOMAIN]), + '--', '-pidfile', self.pid_file, '-vnc', 'none', + '-m', '2G', '-smp', '2', '-nographic', + '-serial', 'file:' + self.console_file] + subprocess.Popen(cmd, + close_fds=True, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + while not os.path.exists(self.pid_file): + time.sleep(1) + + with open(self.pid_file, 'r') as pid_f: + self.pid = pid_f.readlines()[0].strip() + + if wait: + self._wait_for_system(wait_for_cloud_init) + + def console_log(self): + if not self.console_file: + return b'' + with open(self.console_file, "rb") as fp: + return fp.read() + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py new file mode 100644 index 00000000..85933463 --- /dev/null +++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py @@ -0,0 +1,89 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base NoCloud KVM platform.""" +import glob +import os + +from simplestreams import filters +from simplestreams import mirrors +from simplestreams import objectstores +from simplestreams import util as s_util + +from ..platforms import Platform +from .image import NoCloudKVMImage +from .instance import NoCloudKVMInstance +from cloudinit import util as c_util +from tests.cloud_tests import util + + +class NoCloudKVMPlatform(Platform): + """NoCloud KVM test platform.""" + + platform_name = 'nocloud-kvm' + + def get_image(self, img_conf): + """Get image using specified image configuration. + + @param img_conf: configuration for image + @return_value: cloud_tests.images instance + """ + (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None) + + filter = filters.get_filters(['arch=%s' % c_util.get_architecture(), + 'release=%s' % img_conf['release'], + 'ftype=disk1.img']) + mirror_config = {'filters': filter, + 'keep_items': False, + 'max_items': 1, + 'checksumming_reader': True, + 'item_download': True + } + + def policy(content, path): + return s_util.read_signed(content, keyring=img_conf['keyring']) + + smirror = mirrors.UrlMirrorReader(url, policy=policy) + tstore = objectstores.FileStore(img_conf['mirror_dir']) + tmirror = mirrors.ObjectFilterMirror(config=mirror_config, + objectstore=tstore) + tmirror.sync(smirror, path) + + search_d = os.path.join(img_conf['mirror_dir'], '**', + img_conf['release'], '**', '*.img') + + images = [] + for fname in glob.iglob(search_d, recursive=True): + images.append(fname) + + if len(images) < 1: + raise RuntimeError("No images found under '%s'" % search_d) + if len(images) > 1: + raise RuntimeError( + "Multiple images found in '%s': %s" % (search_d, + ' '.join(images))) + + image = NoCloudKVMImage(self, img_conf, images[0]) + return image + + def create_instance(self, properties, config, features, + src_img_path, image_desc=None, use_desc=None, + user_data=None, meta_data=None): + """Create an instance + + @param src_img_path: image path to launch from + @param properties: image properties + @param config: image configuration + @param features: image features + @param image_desc: description of image being launched + @param use_desc: description of container's use + @return_value: cloud_tests.instances instance + """ + name = util.gen_instance_name(image_desc=image_desc, use_desc=use_desc) + img_path = os.path.join(self.config['data_dir'], name + '.qcow2') + c_util.subp(['qemu-img', 'create', '-f', 'qcow2', + '-b', src_img_path, img_path]) + + return NoCloudKVMInstance(self, name, img_path, properties, config, + features, user_data, meta_data) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/nocloudkvm/snapshot.py b/tests/cloud_tests/platforms/nocloudkvm/snapshot.py new file mode 100644 index 00000000..0005e1f2 --- /dev/null +++ b/tests/cloud_tests/platforms/nocloudkvm/snapshot.py @@ -0,0 +1,79 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base NoCloud KVM snapshot.""" +import os +import shutil +import tempfile + +from ..snapshots import Snapshot + + +class NoCloudKVMSnapshot(Snapshot): + """NoCloud KVM image copy backed snapshot.""" + + platform_name = "nocloud-kvm" + + def __init__(self, platform, properties, config, features, image_path): + """Set up snapshot. + + @param platform: platform object + @param properties: image properties + @param config: image config + @param features: supported feature flags + @param image_path: image file to snapshot. + """ + self._workd = tempfile.mkdtemp(prefix='NoCloudKVMSnapshot') + snapshot = os.path.join(self._workd, 'snapshot') + shutil.copyfile(image_path, snapshot) + self._image_path = snapshot + + super(NoCloudKVMSnapshot, self).__init__( + platform, properties, config, features) + + def launch(self, user_data, meta_data=None, block=True, start=True, + use_desc=None): + """Launch instance. + + @param user_data: user-data for the instance + @param instance_id: instance-id for the instance + @param block: wait until instance is created + @param start: start instance and wait until fully started + @param use_desc: description of snapshot instance use + @return_value: an Instance + """ + key_file = os.path.join(self.platform.config['data_dir'], + self.platform.config['public_key']) + user_data = self.inject_ssh_key(user_data, key_file) + + instance = self.platform.create_instance( + self.properties, self.config, self.features, + self._image_path, image_desc=str(self), use_desc=use_desc, + user_data=user_data, meta_data=meta_data) + + if start: + instance.start() + + return instance + + def inject_ssh_key(self, user_data, key_file): + """Inject the authorized key into the user_data.""" + with open(key_file) as f: + value = f.read() + + key = 'ssh_authorized_keys:' + value = ' - %s' % value.strip() + user_data = user_data.split('\n') + if key in user_data: + user_data.insert(user_data.index(key) + 1, '%s' % value) + else: + user_data.insert(-1, '%s' % key) + user_data.insert(-1, '%s' % value) + + return '\n'.join(user_data) + + def destroy(self): + """Clean up snapshot data.""" + shutil.rmtree(self._workd) + super(NoCloudKVMSnapshot, self).destroy() + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py new file mode 100644 index 00000000..28975368 --- /dev/null +++ b/tests/cloud_tests/platforms/platforms.py @@ -0,0 +1,27 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base platform class.""" + + +class Platform(object): + """Base class for platforms.""" + + platform_name = None + + def __init__(self, config): + """Set up platform.""" + self.config = config + + def get_image(self, img_conf): + """Get image using specified image configuration. + + @param img_conf: configuration for image + @return_value: cloud_tests.images instance + """ + raise NotImplementedError + + def destroy(self): + """Clean up platform data.""" + pass + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/snapshots.py b/tests/cloud_tests/platforms/snapshots.py new file mode 100644 index 00000000..94328982 --- /dev/null +++ b/tests/cloud_tests/platforms/snapshots.py @@ -0,0 +1,45 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base snapshot.""" + + +class Snapshot(object): + """Base class for snapshots.""" + + platform_name = None + + def __init__(self, platform, properties, config, features): + """Set up snapshot. + + @param platform: platform object + @param properties: image properties + @param config: image config + @param features: supported feature flags + """ + self.platform = platform + self.properties = properties + self.config = config + self.features = features + + def __str__(self): + """A brief description of the snapshot.""" + return '-'.join((self.properties['os'], self.properties['release'])) + + def launch(self, user_data, meta_data=None, block=True, start=True, + use_desc=None): + """Launch instance. + + @param user_data: user-data for the instance + @param instance_id: instance-id for the instance + @param block: wait until instance is created + @param start: start instance and wait until fully started + @param use_desc: description of snapshot instance use + @return_value: an Instance + """ + raise NotImplementedError + + def destroy(self): + """Clean up snapshot data.""" + pass + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/snapshots/__init__.py b/tests/cloud_tests/snapshots/__init__.py deleted file mode 100644 index 93a54f5e..00000000 --- a/tests/cloud_tests/snapshots/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Main init.""" - - -def get_snapshot(image): - """Get snapshot from image.""" - return image.snapshot() - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/snapshots/base.py b/tests/cloud_tests/snapshots/base.py deleted file mode 100644 index 94328982..00000000 --- a/tests/cloud_tests/snapshots/base.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base snapshot.""" - - -class Snapshot(object): - """Base class for snapshots.""" - - platform_name = None - - def __init__(self, platform, properties, config, features): - """Set up snapshot. - - @param platform: platform object - @param properties: image properties - @param config: image config - @param features: supported feature flags - """ - self.platform = platform - self.properties = properties - self.config = config - self.features = features - - def __str__(self): - """A brief description of the snapshot.""" - return '-'.join((self.properties['os'], self.properties['release'])) - - def launch(self, user_data, meta_data=None, block=True, start=True, - use_desc=None): - """Launch instance. - - @param user_data: user-data for the instance - @param instance_id: instance-id for the instance - @param block: wait until instance is created - @param start: start instance and wait until fully started - @param use_desc: description of snapshot instance use - @return_value: an Instance - """ - raise NotImplementedError - - def destroy(self): - """Clean up snapshot data.""" - pass - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/snapshots/lxd.py b/tests/cloud_tests/snapshots/lxd.py deleted file mode 100644 index 39c55c5e..00000000 --- a/tests/cloud_tests/snapshots/lxd.py +++ /dev/null @@ -1,53 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base LXD snapshot.""" - -from tests.cloud_tests.snapshots import base - - -class LXDSnapshot(base.Snapshot): - """LXD image copy backed snapshot.""" - - platform_name = "lxd" - - def __init__(self, platform, properties, config, features, - pylxd_frozen_instance): - """Set up snapshot. - - @param platform: platform object - @param properties: image properties - @param config: image config - @param features: supported feature flags - """ - self.pylxd_frozen_instance = pylxd_frozen_instance - super(LXDSnapshot, self).__init__( - platform, properties, config, features) - - def launch(self, user_data, meta_data=None, block=True, start=True, - use_desc=None): - """Launch instance. - - @param user_data: user-data for the instance - @param instance_id: instance-id for the instance - @param block: wait until instance is created - @param start: start instance and wait until fully started - @param use_desc: description of snapshot instance use - @return_value: an Instance - """ - inst_config = {'user.user-data': user_data} - if meta_data: - inst_config['user.meta-data'] = meta_data - instance = self.platform.launch_container( - self.properties, self.config, self.features, block=block, - image_desc=str(self), container=self.pylxd_frozen_instance.name, - use_desc=use_desc, container_config=inst_config) - if start: - instance.start() - return instance - - def destroy(self): - """Clean up snapshot data.""" - self.pylxd_frozen_instance.destroy() - super(LXDSnapshot, self).destroy() - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/snapshots/nocloudkvm.py b/tests/cloud_tests/snapshots/nocloudkvm.py deleted file mode 100644 index 21e908da..00000000 --- a/tests/cloud_tests/snapshots/nocloudkvm.py +++ /dev/null @@ -1,79 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base NoCloud KVM snapshot.""" -import os -import shutil -import tempfile - -from tests.cloud_tests.snapshots import base - - -class NoCloudKVMSnapshot(base.Snapshot): - """NoCloud KVM image copy backed snapshot.""" - - platform_name = "nocloud-kvm" - - def __init__(self, platform, properties, config, features, image_path): - """Set up snapshot. - - @param platform: platform object - @param properties: image properties - @param config: image config - @param features: supported feature flags - @param image_path: image file to snapshot. - """ - self._workd = tempfile.mkdtemp(prefix='NoCloudKVMSnapshot') - snapshot = os.path.join(self._workd, 'snapshot') - shutil.copyfile(image_path, snapshot) - self._image_path = snapshot - - super(NoCloudKVMSnapshot, self).__init__( - platform, properties, config, features) - - def launch(self, user_data, meta_data=None, block=True, start=True, - use_desc=None): - """Launch instance. - - @param user_data: user-data for the instance - @param instance_id: instance-id for the instance - @param block: wait until instance is created - @param start: start instance and wait until fully started - @param use_desc: description of snapshot instance use - @return_value: an Instance - """ - key_file = os.path.join(self.platform.config['data_dir'], - self.platform.config['public_key']) - user_data = self.inject_ssh_key(user_data, key_file) - - instance = self.platform.create_instance( - self.properties, self.config, self.features, - self._image_path, image_desc=str(self), use_desc=use_desc, - user_data=user_data, meta_data=meta_data) - - if start: - instance.start() - - return instance - - def inject_ssh_key(self, user_data, key_file): - """Inject the authorized key into the user_data.""" - with open(key_file) as f: - value = f.read() - - key = 'ssh_authorized_keys:' - value = ' - %s' % value.strip() - user_data = user_data.split('\n') - if key in user_data: - user_data.insert(user_data.index(key) + 1, '%s' % value) - else: - user_data.insert(-1, '%s' % key) - user_data.insert(-1, '%s' % value) - - return '\n'.join(user_data) - - def destroy(self): - """Clean up snapshot data.""" - shutil.rmtree(self._workd) - super(NoCloudKVMSnapshot, self).destroy() - -# vi: ts=4 expandtab -- cgit v1.2.3 From 30b4d15764a1a9644379cf95770e8b2480856882 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 5 Dec 2017 16:25:11 -0700 Subject: cli: Add clean and status subcommands The 'cloud-init clean' command allows a user or script to clear cloud-init artifacts from the system so that cloud-init sees the system as unconfigured upon reboot. Optional parameters can be provided to remove cloud-init logs and reboot after clean. The 'cloud-init status' command allows the user or script to check whether cloud-init has finished all configuration stages and whether errors occurred. An optional --wait argument will poll on a 0.25 second interval until cloud-init configuration is complete. The benefit here is scripts can block on cloud-init completion before performing post-config tasks. --- cloudinit/cmd/clean.py | 102 +++++++++++ cloudinit/cmd/main.py | 18 ++ cloudinit/cmd/status.py | 157 +++++++++++++++++ cloudinit/cmd/tests/__init__.py | 0 cloudinit/cmd/tests/test_clean.py | 159 +++++++++++++++++ cloudinit/cmd/tests/test_status.py | 353 +++++++++++++++++++++++++++++++++++++ cloudinit/distros/__init__.py | 16 +- cloudinit/util.py | 26 +++ tests/unittests/test_cli.py | 30 +++- tests/unittests/test_util.py | 38 ++++ 10 files changed, 888 insertions(+), 11 deletions(-) create mode 100644 cloudinit/cmd/clean.py create mode 100644 cloudinit/cmd/status.py create mode 100644 cloudinit/cmd/tests/__init__.py create mode 100644 cloudinit/cmd/tests/test_clean.py create mode 100644 cloudinit/cmd/tests/test_status.py (limited to 'tests') diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py new file mode 100644 index 00000000..81797b1c --- /dev/null +++ b/cloudinit/cmd/clean.py @@ -0,0 +1,102 @@ +# Copyright (C) 2017 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Define 'clean' utility and handler as part of cloud-init commandline.""" + +import argparse +import os +import sys + +from cloudinit.stages import Init +from cloudinit.util import ( + ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, subp) + + +def error(msg): + sys.stderr.write("ERROR: " + msg + "\n") + + +def get_parser(parser=None): + """Build or extend an arg parser for clean utility. + + @param parser: Optional existing ArgumentParser instance representing the + clean subcommand which will be extended to support the args of + this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser( + prog='clean', + description=('Remove logs and artifacts so cloud-init re-runs on ' + 'a clean system')) + parser.add_argument( + '-l', '--logs', action='store_true', default=False, dest='remove_logs', + help='Remove cloud-init logs.') + parser.add_argument( + '-r', '--reboot', action='store_true', default=False, + help='Reboot system after logs are cleaned so cloud-init re-runs.') + parser.add_argument( + '-s', '--seed', action='store_true', default=False, dest='remove_seed', + help='Remove cloud-init seed directory /var/lib/cloud/seed.') + return parser + + +def remove_artifacts(remove_logs, remove_seed=False): + """Helper which removes artifacts dir and optionally log files. + + @param: remove_logs: Boolean. Set True to delete the cloud_dir path. False + preserves them. + @param: remove_seed: Boolean. Set True to also delete seed subdir in + paths.cloud_dir. + @returns: 0 on success, 1 otherwise. + """ + init = Init(ds_deps=[]) + init.read_cfg() + if remove_logs: + for log_file in get_config_logfiles(init.cfg): + del_file(log_file) + + if not os.path.isdir(init.paths.cloud_dir): + return 0 # Artifacts dir already cleaned + with chdir(init.paths.cloud_dir): + for path in os.listdir('.'): + if path == 'seed' and not remove_seed: + continue + try: + if os.path.isdir(path): + del_dir(path) + else: + del_file(path) + except OSError as e: + error('Could not remove {0}: {1}'.format(path, str(e))) + return 1 + return 0 + + +def handle_clean_args(name, args): + """Handle calls to 'cloud-init clean' as a subcommand.""" + exit_code = remove_artifacts(args.remove_logs, args.remove_seed) + if exit_code == 0 and args.reboot: + cmd = ['shutdown', '-r', 'now'] + try: + subp(cmd, capture=False) + except ProcessExecutionError as e: + error( + 'Could not reboot this system using "{0}": {1}'.format( + cmd, str(e))) + exit_code = 1 + return exit_code + + +def main(): + """Tool to collect and tar all cloud-init related logs.""" + parser = get_parser() + sys.exit(handle_clean_args('clean', parser.parse_args())) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 6fb9d9e7..aa56225d 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -767,6 +767,12 @@ def main(sysv_args=None): parser_collect_logs = subparsers.add_parser( 'collect-logs', help='Collect and tar all cloud-init debug info') + parser_clean = subparsers.add_parser( + 'clean', help='Remove logs and artifacts so cloud-init can re-run.') + + parser_status = subparsers.add_parser( + 'status', help='Report cloud-init status or wait on completion.') + if sysv_args: # Only load subparsers if subcommand is specified to avoid load cost if sysv_args[0] == 'analyze': @@ -783,6 +789,18 @@ def main(sysv_args=None): logs_parser(parser_collect_logs) parser_collect_logs.set_defaults( action=('collect-logs', handle_collect_logs_args)) + elif sysv_args[0] == 'clean': + from cloudinit.cmd.clean import ( + get_parser as clean_parser, handle_clean_args) + clean_parser(parser_clean) + parser_clean.set_defaults( + action=('clean', handle_clean_args)) + elif sysv_args[0] == 'status': + from cloudinit.cmd.status import ( + get_parser as status_parser, handle_status_args) + status_parser(parser_status) + parser_status.set_defaults( + action=('status', handle_status_args)) args = parser.parse_args(args=sysv_args) diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py new file mode 100644 index 00000000..3e5d0d07 --- /dev/null +++ b/cloudinit/cmd/status.py @@ -0,0 +1,157 @@ +# Copyright (C) 2017 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Define 'status' utility and handler as part of cloud-init commandline.""" + +import argparse +import os +import sys +from time import gmtime, strftime, sleep + +from cloudinit.distros import uses_systemd +from cloudinit.stages import Init +from cloudinit.util import get_cmdline, load_file, load_json + +CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' + +# customer visible status messages +STATUS_ENABLED_NOT_RUN = 'not run' +STATUS_RUNNING = 'running' +STATUS_DONE = 'done' +STATUS_ERROR = 'error' +STATUS_DISABLED = 'disabled' + + +def get_parser(parser=None): + """Build or extend an arg parser for status utility. + + @param parser: Optional existing ArgumentParser instance representing the + status subcommand which will be extended to support the args of + this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser( + prog='status', + description='Report run status of cloud init') + parser.add_argument( + '-l', '--long', action='store_true', default=False, + help=('Report long format of statuses including run stage name and' + ' error messages')) + parser.add_argument( + '-w', '--wait', action='store_true', default=False, + help='Block waiting on cloud-init to complete') + return parser + + +def handle_status_args(name, args): + """Handle calls to 'cloud-init status' as a subcommand.""" + # Read configured paths + init = Init(ds_deps=[]) + init.read_cfg() + + status, status_detail, time = _get_status_details(init.paths) + if args.wait: + while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): + sys.stdout.write('.') + sys.stdout.flush() + status, status_detail, time = _get_status_details(init.paths) + sleep(0.25) + sys.stdout.write('\n') + if args.long: + print('status: {0}'.format(status)) + if time: + print('time: {0}'.format(time)) + print('detail:\n{0}'.format(status_detail)) + else: + print('status: {0}'.format(status)) + return 1 if status == STATUS_ERROR else 0 + + +def _is_cloudinit_disabled(disable_file, paths): + """Report whether cloud-init is disabled. + + @param disable_file: The path to the cloud-init disable file. + @param paths: An initialized cloudinit.helpers.Paths object. + @returns: A tuple containing (bool, reason) about cloud-init's status and + why. + """ + is_disabled = False + cmdline_parts = get_cmdline().split() + if not uses_systemd(): + reason = 'Cloud-init enabled on sysvinit' + elif 'cloud-init=enabled' in cmdline_parts: + reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' + elif os.path.exists(disable_file): + is_disabled = True + reason = 'Cloud-init disabled by {0}'.format(disable_file) + elif 'cloud-init=disabled' in cmdline_parts: + is_disabled = True + reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' + elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): + is_disabled = True + reason = 'Cloud-init disabled by cloud-init-generator' + return (is_disabled, reason) + + +def _get_status_details(paths): + """Return a 3-tuple of status, status_details and time of last event. + + @param paths: An initialized cloudinit.helpers.paths object. + + Values are obtained from parsing paths.run_dir/status.json. + """ + + status = STATUS_ENABLED_NOT_RUN + status_detail = '' + status_v1 = {} + + status_file = os.path.join(paths.run_dir, 'status.json') + + (is_disabled, reason) = _is_cloudinit_disabled( + CLOUDINIT_DISABLED_FILE, paths) + if is_disabled: + status = STATUS_DISABLED + status_detail = reason + if os.path.exists(status_file): + status_v1 = load_json(load_file(status_file)).get('v1', {}) + errors = [] + latest_event = 0 + for key, value in sorted(status_v1.items()): + if key == 'stage': + if value: + status_detail = 'Running in stage: {0}'.format(value) + elif key == 'datasource': + status_detail = value + elif isinstance(value, dict): + errors.extend(value.get('errors', [])) + finished = value.get('finished') or 0 + if finished == 0: + status = STATUS_RUNNING + event_time = max(value.get('start', 0), finished) + if event_time > latest_event: + latest_event = event_time + if errors: + status = STATUS_ERROR + status_detail = '\n'.join(errors) + elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: + status = STATUS_DONE + if latest_event: + time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) + else: + time = '' + return status, status_detail, time + + +def main(): + """Tool to report status of cloud-init.""" + parser = get_parser() + sys.exit(handle_status_args('status', parser.parse_args())) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/__init__.py b/cloudinit/cmd/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py new file mode 100644 index 00000000..af438aab --- /dev/null +++ b/cloudinit/cmd/tests/test_clean.py @@ -0,0 +1,159 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.cmd import clean +from cloudinit.util import ensure_dir, write_file +from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock +from collections import namedtuple +import os +from six import StringIO + +mypaths = namedtuple('MyPaths', 'cloud_dir') + + +class TestClean(CiTestCase): + + def setUp(self): + super(TestClean, self).setUp() + self.new_root = self.tmp_dir() + self.artifact_dir = self.tmp_path('artifacts', self.new_root) + self.log1 = self.tmp_path('cloud-init.log', self.new_root) + self.log2 = self.tmp_path('cloud-init-output.log', self.new_root) + + class FakeInit(object): + cfg = {'def_log_file': self.log1, + 'output': {'all': '|tee -a {0}'.format(self.log2)}} + paths = mypaths(cloud_dir=self.artifact_dir) + + def __init__(self, ds_deps): + pass + + def read_cfg(self): + pass + + self.init_class = FakeInit + + def test_remove_artifacts_removes_logs(self): + """remove_artifacts removes logs when remove_logs is True.""" + write_file(self.log1, 'cloud-init-log') + write_file(self.log2, 'cloud-init-output-log') + + self.assertFalse( + os.path.exists(self.artifact_dir), 'Unexpected artifacts dir') + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=True) + self.assertFalse(os.path.exists(self.log1), 'Unexpected file') + self.assertFalse(os.path.exists(self.log2), 'Unexpected file') + self.assertEqual(0, retcode) + + def test_remove_artifacts_preserves_logs(self): + """remove_artifacts leaves logs when remove_logs is False.""" + write_file(self.log1, 'cloud-init-log') + write_file(self.log2, 'cloud-init-output-log') + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertTrue(os.path.exists(self.log1), 'Missing expected file') + self.assertTrue(os.path.exists(self.log2), 'Missing expected file') + self.assertEqual(0, retcode) + + def test_remove_artifacts_removes_artifacts_skipping_seed(self): + """remove_artifacts cleans artifacts dir with exception of seed dir.""" + dirs = [ + self.artifact_dir, + os.path.join(self.artifact_dir, 'seed'), + os.path.join(self.artifact_dir, 'dir1'), + os.path.join(self.artifact_dir, 'dir2')] + for _dir in dirs: + ensure_dir(_dir) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(0, retcode) + for expected_dir in dirs[:2]: + self.assertTrue( + os.path.exists(expected_dir), + 'Missing {0} dir'.format(expected_dir)) + for deleted_dir in dirs[2:]: + self.assertFalse( + os.path.exists(deleted_dir), + 'Unexpected {0} dir'.format(deleted_dir)) + + def test_remove_artifacts_removes_artifacts_removes_seed(self): + """remove_artifacts removes seed dir when remove_seed is True.""" + dirs = [ + self.artifact_dir, + os.path.join(self.artifact_dir, 'seed'), + os.path.join(self.artifact_dir, 'dir1'), + os.path.join(self.artifact_dir, 'dir2')] + for _dir in dirs: + ensure_dir(_dir) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False, remove_seed=True) + self.assertEqual(0, retcode) + self.assertTrue( + os.path.exists(self.artifact_dir), 'Missing artifact dir') + for deleted_dir in dirs[1:]: + self.assertFalse( + os.path.exists(deleted_dir), + 'Unexpected {0} dir'.format(deleted_dir)) + + def test_remove_artifacts_returns_one_on_errors(self): + """remove_artifacts returns non-zero on failure and prints an error.""" + ensure_dir(self.artifact_dir) + ensure_dir(os.path.join(self.artifact_dir, 'dir1')) + + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'del_dir': {'side_effect': OSError('oops')}, + 'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(1, retcode) + self.assertEqual( + 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue()) + + def test_handle_clean_args_reboots(self): + """handle_clean_args_reboots when reboot arg is provided.""" + + called_cmds = [] + + def fake_subp(cmd, capture): + called_cmds.append((cmd, capture)) + return '', '' + + myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot') + cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True) + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'subp': {'side_effect': fake_subp}, + 'Init': {'side_effect': self.init_class}}, + clean.handle_clean_args, name='does not matter', args=cmdargs) + self.assertEqual(0, retcode) + self.assertEqual( + [(['shutdown', '-r', 'now'], False)], called_cmds) + + def test_status_main(self): + '''clean.main can be run as a standalone script.''' + write_file(self.log1, 'cloud-init-log') + with self.assertRaises(SystemExit) as context_manager: + wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}, + 'sys.argv': {'new': ['clean', '--logs']}}, + clean.main) + + self.assertEqual(0, context_manager.exception.code) + self.assertFalse( + os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) + + +# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py new file mode 100644 index 00000000..8ec9b5bc --- /dev/null +++ b/cloudinit/cmd/tests/test_status.py @@ -0,0 +1,353 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +import os +from six import StringIO +from textwrap import dedent + +from cloudinit.atomic_helper import write_json +from cloudinit.cmd import status +from cloudinit.util import write_file +from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock + +mypaths = namedtuple('MyPaths', 'run_dir') +myargs = namedtuple('MyArgs', 'long wait') + + +class TestStatus(CiTestCase): + + def setUp(self): + super(TestStatus, self).setUp() + self.new_root = self.tmp_dir() + self.status_file = self.tmp_path('status.json', self.new_root) + self.disable_file = self.tmp_path('cloudinit-disable', self.new_root) + self.paths = mypaths(run_dir=self.new_root) + + class FakeInit(object): + paths = self.paths + + def __init__(self, ds_deps): + pass + + def read_cfg(self): + pass + + self.init_class = FakeInit + + def test__is_cloudinit_disabled_false_on_sysvinit(self): + '''When not in an environment using systemd, return False.''' + write_file(self.disable_file, '') # Create the ignored disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': False}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse( + is_disabled, 'expected enabled cloud-init on sysvinit') + self.assertEqual('Cloud-init enabled on sysvinit', reason) + + def test__is_cloudinit_disabled_true_on_disable_file(self): + '''When using systemd and disable_file is present return disabled.''' + write_file(self.disable_file, '') # Create observed disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual( + 'Cloud-init disabled by {0}'.format(self.disable_file), reason) + + def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): + '''Not disabled when using systemd and enabled via commandline.''' + write_file(self.disable_file, '') # Create ignored disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something cloud-init=enabled else'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse(is_disabled, 'expected enabled cloud-init') + self.assertEqual( + 'Cloud-init enabled by kernel command line cloud-init=enabled', + reason) + + def test__is_cloudinit_disabled_true_on_kernel_cmdline(self): + '''When using systemd and disable_file is present return disabled.''' + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something cloud-init=disabled else'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual( + 'Cloud-init disabled by kernel parameter cloud-init=disabled', + reason) + + def test__is_cloudinit_disabled_true_when_generator_disables(self): + '''When cloud-init-generator doesn't write enabled file return True.''' + enabled_file = os.path.join(self.paths.run_dir, 'enabled') + self.assertFalse(os.path.exists(enabled_file)) + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual('Cloud-init disabled by cloud-init-generator', reason) + + def test_status_returns_not_run(self): + '''When status.json does not exist yet, return 'not run'.''' + self.assertFalse( + os.path.exists(self.status_file), 'Unexpected status.json found') + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: not run\n', m_stdout.getvalue()) + + def test_status_returns_disabled_long_on_presence_of_disable_file(self): + '''When cloudinit is disabled, return disabled reason.''' + + checked_files = [] + + def fakeexists(filepath): + checked_files.append(filepath) + status_file = os.path.join(self.paths.run_dir, 'status.json') + return bool(not filepath == status_file) + + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'os.path.exists': {'side_effect': fakeexists}, + '_is_cloudinit_disabled': (True, 'disabled for some reason'), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual( + [os.path.join(self.paths.run_dir, 'status.json')], + checked_files) + expected = dedent('''\ + status: disabled + detail: + disabled for some reason + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_returns_running(self): + '''Report running when status file exists but isn't finished.''' + write_json(self.status_file, {'v1': {'init': {'finished': None}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: running\n', m_stdout.getvalue()) + + def test_status_returns_done(self): + '''Reports done when stage is None and all stages are finished.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'blah': {'finished': 123.456}, + 'init': {'errors': [], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: done\n', m_stdout.getvalue()) + + def test_status_returns_done_long(self): + '''Long format of done status includes datasource info.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'init': {'start': 124.567, 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + expected = dedent('''\ + status: done + time: Thu, 01 Jan 1970 00:02:05 +0000 + detail: + DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_on_errors(self): + '''Reports error when any stage has errors.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'blah': {'errors': [], 'finished': 123.456}, + 'init': {'errors': ['error1'], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + self.assertEqual('status: error\n', m_stdout.getvalue()) + + def test_status_on_errors_long(self): + '''Long format of error status includes all error messages.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'init': {'errors': ['error1'], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'errors': ['error2', 'error3'], + 'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + expected = dedent('''\ + status: error + time: Thu, 01 Jan 1970 00:02:05 +0000 + detail: + error1 + error2 + error3 + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_returns_running_long_format(self): + '''Long format reports the stage in which we are running.''' + write_json( + self.status_file, + {'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + expected = dedent('''\ + status: running + time: Thu, 01 Jan 1970 00:02:04 +0000 + detail: + Running in stage: init + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_wait_blocks_until_done(self): + '''Specifying wait will poll every 1/4 second until done state.''' + running_json = { + 'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + done_json = { + 'v1': {'stage': None, + 'init': {'start': 124.456, 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + + self.sleep_calls = 0 + + def fake_sleep(interval): + self.assertEqual(0.25, interval) + self.sleep_calls += 1 + if self.sleep_calls == 2: + write_json(self.status_file, running_json) + elif self.sleep_calls == 3: + write_json(self.status_file, done_json) + + cmdargs = myargs(long=False, wait=True) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'sleep': {'side_effect': fake_sleep}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual(4, self.sleep_calls) + self.assertEqual('....\nstatus: done\n', m_stdout.getvalue()) + + def test_status_wait_blocks_until_error(self): + '''Specifying wait will poll every 1/4 second until error state.''' + running_json = { + 'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + error_json = { + 'v1': {'stage': None, + 'init': {'errors': ['error1'], 'start': 124.456, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + + self.sleep_calls = 0 + + def fake_sleep(interval): + self.assertEqual(0.25, interval) + self.sleep_calls += 1 + if self.sleep_calls == 2: + write_json(self.status_file, running_json) + elif self.sleep_calls == 3: + write_json(self.status_file, error_json) + + cmdargs = myargs(long=False, wait=True) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'sleep': {'side_effect': fake_sleep}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + self.assertEqual(4, self.sleep_calls) + self.assertEqual('....\nstatus: error\n', m_stdout.getvalue()) + + def test_status_main(self): + '''status.main can be run as a standalone script.''' + write_json(self.status_file, {'v1': {'init': {'finished': None}}}) + with self.assertRaises(SystemExit) as context_manager: + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + wrap_and_call( + 'cloudinit.cmd.status', + {'sys.argv': {'new': ['status']}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.main) + self.assertEqual(0, context_manager.exception.code) + self.assertEqual('status: running\n', m_stdout.getvalue()) + +# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index d5becd12..99e60e7a 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -102,11 +102,8 @@ class Distro(object): self._apply_hostname(writeable_hostname) def uses_systemd(self): - try: - res = os.lstat('/run/systemd/system') - return stat.S_ISDIR(res.st_mode) - except Exception: - return False + """Wrapper to report whether this distro uses systemd or sysvinit.""" + return uses_systemd() @abc.abstractmethod def package_command(self, cmd, args=None, pkgs=None): @@ -761,4 +758,13 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", util.copy(tz_file, tz_local) return + +def uses_systemd(): + try: + res = os.lstat('/run/systemd/system') + return stat.S_ISDIR(res.st_mode) + except Exception: + return False + + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 6c014ba5..320d64e0 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1398,6 +1398,32 @@ def get_output_cfg(cfg, mode): return ret +def get_config_logfiles(cfg): + """Return a list of log file paths from the configuration dictionary. + + @param cfg: The cloud-init merged configuration dictionary. + """ + logs = [] + if not cfg or not isinstance(cfg, dict): + return logs + default_log = cfg.get('def_log_file') + if default_log: + logs.append(default_log) + for fmt in get_output_cfg(cfg, None): + if not fmt: + continue + match = re.match('(?P\||>+)\s*(?P.*)', fmt) + if not match: + continue + target = match.group('target') + parts = target.split() + if len(parts) == 1: + logs.append(target) + elif ['tee', '-a'] == parts[:2]: + logs.append(parts[2]) + return list(set(logs)) + + def logexc(log, msg, *args): # Setting this here allows this to change # levels easily (not always error level) diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index fccbbd23..a8d28ae6 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -2,9 +2,9 @@ import six +from cloudinit.cmd import main as cli from cloudinit.tests import helpers as test_helpers -from cloudinit.cmd import main as cli mock = test_helpers.mock @@ -45,8 +45,8 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): """All known subparsers are represented in the cloud-int help doc.""" self._call_main() error = self.stderr.getvalue() - expected_subcommands = ['analyze', 'init', 'modules', 'single', - 'dhclient-hook', 'features', 'devel'] + expected_subcommands = ['analyze', 'clean', 'devel', 'dhclient-hook', + 'features', 'init', 'modules', 'single'] for subcommand in expected_subcommands: self.assertIn(subcommand, error) @@ -76,9 +76,11 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self.patchStdoutAndStderr(stdout=stdout) expected_errors = [ - 'usage: cloud-init analyze', 'usage: cloud-init collect-logs', - 'usage: cloud-init devel'] - conditional_subcommands = ['analyze', 'collect-logs', 'devel'] + 'usage: cloud-init analyze', 'usage: cloud-init clean', + 'usage: cloud-init collect-logs', 'usage: cloud-init devel', + 'usage: cloud-init status'] + conditional_subcommands = [ + 'analyze', 'clean', 'collect-logs', 'devel', 'status'] # The cloud-init entrypoint calls main without passing sys_argv for subcommand in conditional_subcommands: with mock.patch('sys.argv', ['cloud-init', subcommand, '-h']): @@ -106,6 +108,22 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self._call_main(['cloud-init', 'collect-logs', '-h']) self.assertIn('usage: cloud-init collect-log', stdout.getvalue()) + def test_clean_subcommand_parser(self): + """The subcommand cloud-init clean calls the subparser.""" + # Provide -h param to clean to avoid having to mock behavior. + stdout = six.StringIO() + self.patchStdoutAndStderr(stdout=stdout) + self._call_main(['cloud-init', 'clean', '-h']) + self.assertIn('usage: cloud-init clean', stdout.getvalue()) + + def test_status_subcommand_parser(self): + """The subcommand cloud-init status calls the subparser.""" + # Provide -h param to clean to avoid having to mock behavior. + stdout = six.StringIO() + self.patchStdoutAndStderr(stdout=stdout) + self._call_main(['cloud-init', 'status', '-h']) + self.assertIn('usage: cloud-init status', stdout.getvalue()) + def test_devel_subcommand_parser(self): """The subcommand cloud-init devel calls the correct subparser.""" self._call_main(['cloud-init', 'devel']) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 3e4154ca..71f59529 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -477,6 +477,44 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): self.assertIsNone(util.read_dmi_data("system-product-name")) +class TestGetConfigLogfiles(helpers.CiTestCase): + + def test_empty_cfg_returns_empty_list(self): + """An empty config passed to get_config_logfiles returns empty list.""" + self.assertEqual([], util.get_config_logfiles(None)) + self.assertEqual([], util.get_config_logfiles({})) + + def test_default_log_file_present(self): + """When default_log_file is set get_config_logfiles finds it.""" + self.assertEqual( + ['/my.log'], + util.get_config_logfiles({'def_log_file': '/my.log'})) + + def test_output_logs_parsed_when_teeing_files(self): + """When output configuration is parsed when teeing files.""" + self.assertEqual( + ['/himom.log', '/my.log'], + sorted(util.get_config_logfiles({ + 'def_log_file': '/my.log', + 'output': {'all': '|tee -a /himom.log'}}))) + + def test_output_logs_parsed_when_redirecting(self): + """When output configuration is parsed when redirecting to a file.""" + self.assertEqual( + ['/my.log', '/test.log'], + sorted(util.get_config_logfiles({ + 'def_log_file': '/my.log', + 'output': {'all': '>/test.log'}}))) + + def test_output_logs_parsed_when_appending(self): + """When output configuration is parsed when appending to a file.""" + self.assertEqual( + ['/my.log', '/test.log'], + sorted(util.get_config_logfiles({ + 'def_log_file': '/my.log', + 'output': {'all': '>> /test.log'}}))) + + class TestMultiLog(helpers.FilesystemMockingTestCase): def _createConsole(self, root): -- cgit v1.2.3 From 0cf6db3617e0cebeb89c4809396f84360827e96c Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 5 Dec 2017 16:42:35 -0700 Subject: Datasources: Formalize DataSource get_data and related properties. Each DataSource subclass must define its own get_data method. This branch formalizes our DataSource class to require that subclasses define an explicit dsname for sourcing cloud-config datasource configuration. Subclasses must also override the _get_data method or a NotImplementedError is raised. The branch also writes /run/cloud-init/instance-data.json. This file contains all meta-data, user-data and vendor-data and a standardized set of metadata keys in a json blob which other utilities with root-access could make use of. Because some meta-data or user-data is potentially sensitive the file is only readable by root. Generally most metadata content types should be json serializable. If specific keys or values are not serializable, those specific values will be base64encoded and the key path will be listed under the top-level key 'base64-encoded-keys' in instance-data.json. If json writing fails due to other TypeErrors or UnicodeDecodeErrors, a warning log will be emitted to /var/log/cloud-init.log and no instance-data.json will be created. --- cloudinit/analyze/__main__.py | 4 +- cloudinit/analyze/dump.py | 8 +- cloudinit/sources/DataSourceAliYun.py | 1 + cloudinit/sources/DataSourceAltCloud.py | 5 +- cloudinit/sources/DataSourceAzure.py | 4 +- cloudinit/sources/DataSourceBigstep.py | 5 +- cloudinit/sources/DataSourceCloudSigma.py | 5 +- cloudinit/sources/DataSourceCloudStack.py | 5 +- cloudinit/sources/DataSourceConfigDrive.py | 5 +- cloudinit/sources/DataSourceDigitalOcean.py | 5 +- cloudinit/sources/DataSourceEc2.py | 12 +- cloudinit/sources/DataSourceGCE.py | 5 +- cloudinit/sources/DataSourceMAAS.py | 5 +- cloudinit/sources/DataSourceNoCloud.py | 5 +- cloudinit/sources/DataSourceNone.py | 5 +- cloudinit/sources/DataSourceOVF.py | 5 +- cloudinit/sources/DataSourceOpenNebula.py | 5 +- cloudinit/sources/DataSourceOpenStack.py | 5 +- cloudinit/sources/DataSourceScaleway.py | 4 +- cloudinit/sources/DataSourceSmartOS.py | 5 +- cloudinit/sources/__init__.py | 129 +++++++++++-- cloudinit/sources/tests/__init__.py | 0 cloudinit/sources/tests/test_init.py | 202 +++++++++++++++++++++ cloudinit/tests/helpers.py | 7 - cloudinit/util.py | 33 +++- tests/unittests/test_datasource/test_aliyun.py | 2 +- tests/unittests/test_datasource/test_altcloud.py | 22 ++- tests/unittests/test_datasource/test_azure.py | 28 +-- tests/unittests/test_datasource/test_cloudsigma.py | 13 +- tests/unittests/test_datasource/test_cloudstack.py | 19 +- .../unittests/test_datasource/test_configdrive.py | 3 +- .../unittests/test_datasource/test_digitalocean.py | 11 +- tests/unittests/test_datasource/test_ec2.py | 3 +- tests/unittests/test_datasource/test_gce.py | 3 +- tests/unittests/test_datasource/test_nocloud.py | 14 +- tests/unittests/test_datasource/test_opennebula.py | 12 +- tests/unittests/test_datasource/test_openstack.py | 12 +- tests/unittests/test_datasource/test_scaleway.py | 13 +- tests/unittests/test_datasource/test_smartos.py | 3 +- tests/unittests/test_ds_identify.py | 4 +- tests/unittests/test_runs/test_merge_run.py | 1 + tests/unittests/test_runs/test_simple_run.py | 3 +- 42 files changed, 517 insertions(+), 123 deletions(-) create mode 100644 cloudinit/sources/tests/__init__.py create mode 100644 cloudinit/sources/tests/test_init.py (limited to 'tests') diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 69b9e43e..3ba5903f 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -6,6 +6,8 @@ import argparse import re import sys +from cloudinit.util import json_dumps + from . import dump from . import show @@ -112,7 +114,7 @@ def analyze_show(name, args): def analyze_dump(name, args): """Dump cloud-init events in json format""" (infh, outfh) = configure_io(args) - outfh.write(dump.json_dumps(_get_events(infh)) + '\n') + outfh.write(json_dumps(_get_events(infh)) + '\n') def _get_events(infile): diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index ca4da496..b071aa19 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -2,7 +2,6 @@ import calendar from datetime import datetime -import json import sys from cloudinit import util @@ -132,11 +131,6 @@ def parse_ci_logline(line): return event -def json_dumps(data): - return json.dumps(data, indent=1, sort_keys=True, - separators=(',', ': ')) - - def dump_events(cisource=None, rawdata=None): events = [] event = None @@ -169,7 +163,7 @@ def main(): else: cisource = sys.stdin - return json_dumps(dump_events(cisource)) + return util.json_dumps(dump_events(cisource)) if __name__ == "__main__": diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 43a7e42c..7ac8288d 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -11,6 +11,7 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS" class DataSourceAliYun(EC2.DataSourceEc2): + dsname = 'AliYun' metadata_urls = ['http://100.100.100.200'] # The minimum supported metadata_version from the ec2 metadata apis diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index c78ad9eb..be2d6cf8 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -74,6 +74,9 @@ def read_user_data_callback(mount_dir): class DataSourceAltCloud(sources.DataSource): + + dsname = 'AltCloud' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -112,7 +115,7 @@ class DataSourceAltCloud(sources.DataSource): return 'UNKNOWN' - def get_data(self): + def _get_data(self): ''' Description: User Data is passed to the launching instance which diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 14367e9c..6978d4e5 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -246,6 +246,8 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzure(sources.DataSource): + + dsname = 'Azure' _negotiated = False def __init__(self, sys_cfg, distro, paths): @@ -330,7 +332,7 @@ class DataSourceAzure(sources.DataSource): metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata - def get_data(self): + def _get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py index d7fcd45a..699a85b5 100644 --- a/cloudinit/sources/DataSourceBigstep.py +++ b/cloudinit/sources/DataSourceBigstep.py @@ -16,13 +16,16 @@ LOG = logging.getLogger(__name__) class DataSourceBigstep(sources.DataSource): + + dsname = 'Bigstep' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = {} self.vendordata_raw = "" self.userdata_raw = "" - def get_data(self, apply_filter=False): + def _get_data(self, apply_filter=False): url = get_url_from_file() if url is None: return False diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 19df16b1..4eaad475 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -23,6 +23,9 @@ class DataSourceCloudSigma(sources.DataSource): For more information about CloudSigma's Server Context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html """ + + dsname = 'CloudSigma' + def __init__(self, sys_cfg, distro, paths): self.cepko = Cepko() self.ssh_public_key = '' @@ -46,7 +49,7 @@ class DataSourceCloudSigma(sources.DataSource): LOG.warning("failed to query dmi data for system product name") return False - def get_data(self): + def _get_data(self): """ Metadata is the whole server context and /meta/cloud-config is used as userdata. diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 9dc473fc..0df545fc 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -65,6 +65,9 @@ class CloudStackPasswordServerClient(object): class DataSourceCloudStack(sources.DataSource): + + dsname = 'CloudStack' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'cs') @@ -117,7 +120,7 @@ class DataSourceCloudStack(sources.DataSource): def get_config_obj(self): return self.cfg - def get_data(self): + def _get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): self.userdata_raw = seed_ret['user-data'] diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index ef374f3f..870b3688 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -32,6 +32,9 @@ OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): + + dsname = 'ConfigDrive' + def __init__(self, sys_cfg, distro, paths): super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) self.source = None @@ -50,7 +53,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): mstr += "[source=%s]" % (self.source) return mstr - def get_data(self): + def _get_data(self): found = None md = {} results = {} diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5e7e66be..e0ef665e 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -27,6 +27,9 @@ MD_USE_IPV4LL = True class DataSourceDigitalOcean(sources.DataSource): + + dsname = 'DigitalOcean' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro @@ -44,7 +47,7 @@ class DataSourceDigitalOcean(sources.DataSource): def _get_sysinfo(self): return do_helper.read_sysinfo() - def get_data(self): + def _get_data(self): (is_do, droplet_id) = self._get_sysinfo() # only proceed if we know we are on DigitalOcean diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 7bbbfb63..e5c88334 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -31,6 +31,7 @@ _unset = "_unset" class Platforms(object): + # TODO Rename and move to cloudinit.cloud.CloudNames ALIYUN = "AliYun" AWS = "AWS" BRIGHTBOX = "Brightbox" @@ -45,6 +46,7 @@ class Platforms(object): class DataSourceEc2(sources.DataSource): + dsname = 'Ec2' # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the # following may be discarded if they do not resolve @@ -68,11 +70,15 @@ class DataSourceEc2(sources.DataSource): _fallback_interface = None def __init__(self, sys_cfg, distro, paths): - sources.DataSource.__init__(self, sys_cfg, distro, paths) + super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None self.seed_dir = os.path.join(paths.seed_dir, "ec2") - def get_data(self): + def _get_cloud_name(self): + """Return the cloud name as identified during _get_data.""" + return self.cloud_platform + + def _get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): self.userdata_raw = seed_ret['user-data'] @@ -274,7 +280,7 @@ class DataSourceEc2(sources.DataSource): return None @property - def cloud_platform(self): + def cloud_platform(self): # TODO rename cloud_name if self._cloud_platform is None: self._cloud_platform = identify_platform() return self._cloud_platform diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index ccae4200..ad6dae37 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -42,6 +42,9 @@ class GoogleMetadataFetcher(object): class DataSourceGCE(sources.DataSource): + + dsname = 'GCE' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = dict() @@ -50,7 +53,7 @@ class DataSourceGCE(sources.DataSource): BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] - def get_data(self): + def _get_data(self): ret = util.log_time( LOG.debug, 'Crawl of GCE metadata service', read_md, kwargs={'address': self.metadata_address}) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 77df5a51..496bd06a 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -39,6 +39,9 @@ class DataSourceMAAS(sources.DataSource): hostname vendor-data """ + + dsname = "MAAS" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None @@ -62,7 +65,7 @@ class DataSourceMAAS(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [%s]" % (root, self.base_url) - def get_data(self): + def _get_data(self): mcfg = self.ds_cfg try: diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index e641244d..5d3a8ddb 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -20,6 +20,9 @@ LOG = logging.getLogger(__name__) class DataSourceNoCloud(sources.DataSource): + + dsname = "NoCloud" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -32,7 +35,7 @@ class DataSourceNoCloud(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) - def get_data(self): + def _get_data(self): defaults = { "instance-id": "nocloud", "dsmode": self.dsmode, diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index 906bb278..e63a7e39 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -11,12 +11,15 @@ LOG = logging.getLogger(__name__) class DataSourceNone(sources.DataSource): + + dsname = "None" + def __init__(self, sys_cfg, distro, paths, ud_proc=None): sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) self.metadata = {} self.userdata_raw = '' - def get_data(self): + def _get_data(self): # If the datasource config has any provided 'fallback' # userdata or metadata, use it... if 'userdata_raw' in self.ds_cfg: diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index ccebf11a..6ac621f2 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -43,6 +43,9 @@ LOG = logging.getLogger(__name__) class DataSourceOVF(sources.DataSource): + + dsname = "OVF" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -60,7 +63,7 @@ class DataSourceOVF(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) - def get_data(self): + def _get_data(self): found = [] md = {} ud = "" diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 5fdac192..5da11847 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -31,6 +31,9 @@ CONTEXT_DISK_FILES = ["context.sh"] class DataSourceOpenNebula(sources.DataSource): + + dsname = "OpenNebula" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -40,7 +43,7 @@ class DataSourceOpenNebula(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) - def get_data(self): + def _get_data(self): defaults = {"instance-id": DEFAULT_IID} results = None seed = None diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index b64a7f24..e55a7638 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -24,6 +24,9 @@ DEFAULT_METADATA = { class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): + + dsname = "OpenStack" + def __init__(self, sys_cfg, distro, paths): super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) self.metadata_address = None @@ -96,7 +99,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.metadata_address = url2base.get(avail_url) return bool(avail_url) - def get_data(self): + def _get_data(self): try: if not self.wait_for_metadata_service(): return False diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 3a8a8e8f..b0b19c93 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -169,6 +169,8 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): + dsname = "Scaleway" + def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) @@ -184,7 +186,7 @@ class DataSourceScaleway(sources.DataSource): self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) - def get_data(self): + def _get_data(self): if not on_scaleway(): return False diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 6c6902fd..86bfa5d8 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -159,6 +159,9 @@ LEGACY_USER_D = "/var/db" class DataSourceSmartOS(sources.DataSource): + + dsname = "Joyent" + _unset = "_unset" smartos_type = _unset md_client = _unset @@ -211,7 +214,7 @@ class DataSourceSmartOS(sources.DataSource): os.rename('/'.join([svc_path, 'provisioning']), '/'.join([svc_path, 'provision_success'])) - def get_data(self): + def _get_data(self): self._init() md = {} diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 9a43fbee..4b819ce6 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -10,9 +10,11 @@ import abc import copy +import json import os import six +from cloudinit.atomic_helper import write_json from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils @@ -33,6 +35,12 @@ DEP_FILESYSTEM = "FILESYSTEM" DEP_NETWORK = "NETWORK" DS_PREFIX = 'DataSource' +# File in which instance meta-data, user-data and vendor-data is written +INSTANCE_JSON_FILE = 'instance-data.json' + +# Key which can be provide a cloud's official product name to cloud-init +METADATA_CLOUD_NAME_KEY = 'cloud-name' + LOG = logging.getLogger(__name__) @@ -40,12 +48,39 @@ class DataSourceNotFoundException(Exception): pass +def process_base64_metadata(metadata, key_path=''): + """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" + md_copy = copy.deepcopy(metadata) + md_copy['base64-encoded-keys'] = [] + for key, val in metadata.items(): + if key_path: + sub_key_path = key_path + '/' + key + else: + sub_key_path = key + if isinstance(val, str) and val.startswith('ci-b64:'): + md_copy['base64-encoded-keys'].append(sub_key_path) + md_copy[key] = val.replace('ci-b64:', '') + if isinstance(val, dict): + return_val = process_base64_metadata(val, sub_key_path) + md_copy['base64-encoded-keys'].extend( + return_val.pop('base64-encoded-keys')) + md_copy[key] = return_val + return md_copy + + @six.add_metaclass(abc.ABCMeta) class DataSource(object): dsmode = DSMODE_NETWORK default_locale = 'en_US.UTF-8' + # Datasource name needs to be set by subclasses to determine which + # cloud-config datasource key is loaded + dsname = '_undef' + + # Cached cloud_name as determined by _get_cloud_name + _cloud_name = None + def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro @@ -56,17 +91,8 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None - # find the datasource config name. - # remove 'DataSource' from classname on front, and remove 'Net' on end. - # Both Foo and FooNet sources expect config in cfg['sources']['Foo'] - name = type_utils.obj_name(self) - if name.startswith(DS_PREFIX): - name = name[len(DS_PREFIX):] - if name.endswith('Net'): - name = name[0:-3] - - self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, - ("datasource", name), {}) + self.ds_cfg = util.get_cfg_by_path( + self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: self.ds_cfg = {} @@ -78,6 +104,51 @@ class DataSource(object): def __str__(self): return type_utils.obj_name(self) + def _get_standardized_metadata(self): + """Return a dictionary of standardized metadata keys.""" + return {'v1': { + 'local-hostname': self.get_hostname(), + 'instance-id': self.get_instance_id(), + 'cloud-name': self.cloud_name, + 'region': self.region, + 'availability-zone': self.availability_zone}} + + def get_data(self): + """Datasources implement _get_data to setup metadata and userdata_raw. + + Minimally, the datasource should return a boolean True on success. + """ + return_value = self._get_data() + json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) + if not return_value: + return return_value + + instance_data = { + 'ds': { + 'meta-data': self.metadata, + 'user-data': self.get_userdata_raw(), + 'vendor-data': self.get_vendordata_raw()}} + instance_data.update( + self._get_standardized_metadata()) + try: + # Process content base64encoding unserializable values + content = util.json_dumps(instance_data) + # Strip base64: prefix and return base64-encoded-keys + processed_data = process_base64_metadata(json.loads(content)) + except TypeError as e: + LOG.warning('Error persisting instance-data.json: %s', str(e)) + return return_value + except UnicodeDecodeError as e: + LOG.warning('Error persisting instance-data.json: %s', str(e)) + return return_value + write_json(json_file, processed_data, mode=0o600) + return return_value + + def _get_data(self): + raise NotImplementedError( + 'Subclasses of DataSource must implement _get_data which' + ' sets self.metadata, vendordata_raw and userdata_raw.') + def get_userdata(self, apply_filter=False): if self.userdata is None: self.userdata = self.ud_proc.process(self.get_userdata_raw()) @@ -90,6 +161,34 @@ class DataSource(object): self.vendordata = self.ud_proc.process(self.get_vendordata_raw()) return self.vendordata + @property + def cloud_name(self): + """Return lowercase cloud name as determined by the datasource. + + Datasource can determine or define its own cloud product name in + metadata. + """ + if self._cloud_name: + return self._cloud_name + if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY): + cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) + if isinstance(cloud_name, six.string_types): + self._cloud_name = cloud_name.lower() + LOG.debug( + 'Ignoring metadata provided key %s: non-string type %s', + METADATA_CLOUD_NAME_KEY, type(cloud_name)) + else: + self._cloud_name = self._get_cloud_name().lower() + return self._cloud_name + + def _get_cloud_name(self): + """Return the datasource name as it frequently matches cloud name. + + Should be overridden in subclasses which can run on multiple + cloud names, such as DatasourceEc2. + """ + return self.dsname + @property def launch_index(self): if not self.metadata: @@ -161,8 +260,11 @@ class DataSource(object): @property def availability_zone(self): - return self.metadata.get('availability-zone', - self.metadata.get('availability_zone')) + top_level_az = self.metadata.get( + 'availability-zone', self.metadata.get('availability_zone')) + if top_level_az: + return top_level_az + return self.metadata.get('placement', {}).get('availability-zone') @property def region(self): @@ -417,4 +519,5 @@ def list_from_depends(depends, ds_list): ret_list.append(cls) return ret_list + # vi: ts=4 expandtab diff --git a/cloudinit/sources/tests/__init__.py b/cloudinit/sources/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py new file mode 100644 index 00000000..af151154 --- /dev/null +++ b/cloudinit/sources/tests/test_init.py @@ -0,0 +1,202 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import six +import stat + +from cloudinit.helpers import Paths +from cloudinit.sources import ( + INSTANCE_JSON_FILE, DataSource) +from cloudinit.tests.helpers import CiTestCase, skipIf +from cloudinit.user_data import UserDataProcessor +from cloudinit import util + + +class DataSourceTestSubclassNet(DataSource): + + dsname = 'MyTestSubclass' + + def __init__(self, sys_cfg, distro, paths, custom_userdata=None): + super(DataSourceTestSubclassNet, self).__init__( + sys_cfg, distro, paths) + self._custom_userdata = custom_userdata + + def _get_cloud_name(self): + return 'SubclassCloudName' + + def _get_data(self): + self.metadata = {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'} + if self._custom_userdata: + self.userdata_raw = self._custom_userdata + else: + self.userdata_raw = 'userdata_raw' + self.vendordata_raw = 'vendordata_raw' + return True + + +class InvalidDataSourceTestSubclassNet(DataSource): + pass + + +class TestDataSource(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestDataSource, self).setUp() + self.sys_cfg = {'datasource': {'_undef': {'key1': False}}} + self.distro = 'distrotest' # generally should be a Distro object + self.paths = Paths({}) + self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) + + def test_datasource_init(self): + """DataSource initializes metadata attributes, ds_cfg and ud_proc.""" + self.assertEqual(self.paths, self.datasource.paths) + self.assertEqual(self.sys_cfg, self.datasource.sys_cfg) + self.assertEqual(self.distro, self.datasource.distro) + self.assertIsNone(self.datasource.userdata) + self.assertEqual({}, self.datasource.metadata) + self.assertIsNone(self.datasource.userdata_raw) + self.assertIsNone(self.datasource.vendordata) + self.assertIsNone(self.datasource.vendordata_raw) + self.assertEqual({'key1': False}, self.datasource.ds_cfg) + self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor) + + def test_datasource_init_gets_ds_cfg_using_dsname(self): + """Init uses DataSource.dsname for sourcing ds_cfg.""" + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} + distro = 'distrotest' # generally should be a Distro object + paths = Paths({}) + datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths) + self.assertEqual({'key2': False}, datasource.ds_cfg) + + def test_str_is_classname(self): + """The string representation of the datasource is the classname.""" + self.assertEqual('DataSource', str(self.datasource)) + self.assertEqual( + 'DataSourceTestSubclassNet', + str(DataSourceTestSubclassNet('', '', self.paths))) + + def test__get_data_unimplemented(self): + """Raise an error when _get_data is not implemented.""" + with self.assertRaises(NotImplementedError) as context_manager: + self.datasource.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + datasource2 = InvalidDataSourceTestSubclassNet( + self.sys_cfg, self.distro, self.paths) + with self.assertRaises(NotImplementedError) as context_manager: + datasource2.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + + def test_get_data_calls_subclass__get_data(self): + """Datasource.get_data uses the subclass' version of _get_data.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + self.assertEqual( + {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + datasource.metadata) + self.assertEqual('userdata_raw', datasource.userdata_raw) + self.assertEqual('vendordata_raw', datasource.vendordata_raw) + + def test_get_data_write_json_instance_data(self): + """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected = { + 'base64-encoded-keys': [], + 'v1': { + 'availability-zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'instance-id': 'iid-datasource', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + 'ds': { + 'meta-data': {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + 'user-data': 'userdata_raw', + 'vendor-data': 'vendordata_raw'}} + self.assertEqual(expected, util.load_json(content)) + file_stat = os.stat(json_file) + self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) + + def test_get_data_handles_redacted_unserializable_content(self): + """get_data warns unserializable content in INSTANCE_JSON_FILE.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected_userdata = { + 'key1': 'val1', + 'key2': { + 'key2.1': "Warning: redacted unserializable type "}} + instance_json = util.load_json(content) + self.assertEqual( + expected_userdata, instance_json['ds']['user-data']) + + @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes") + def test_get_data_base64encodes_unserializable_bytes(self): + """On py3, get_data base64encodes any unserializable content.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + instance_json = util.load_json(content) + self.assertEqual( + ['ds/user-data/key2/key2.1'], + instance_json['base64-encoded-keys']) + self.assertEqual( + {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, + instance_json['ds']['user-data']) + + @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes") + def test_get_data_handles_bytes_values(self): + """On py2 get_data handles bytes values without having to b64encode.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + instance_json = util.load_json(content) + self.assertEqual([], instance_json['base64-encoded-keys']) + self.assertEqual( + {'key1': 'val1', 'key2': {'key2.1': '\x123'}}, + instance_json['ds']['user-data']) + + @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") + def test_non_utf8_encoding_logs_warning(self): + """When non-utf-8 values exist in py2 instance-data is not written.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + self.assertFalse(os.path.exists(json_file)) + self.assertIn( + "WARNING: Error persisting instance-data.json: 'utf8' codec can't" + " decode byte 0xaa in position 2: invalid start byte", + self.logs.getvalue()) diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 6f88a5b7..feb884ab 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -3,7 +3,6 @@ from __future__ import print_function import functools -import json import logging import os import shutil @@ -337,12 +336,6 @@ def dir2dict(startdir, prefix=None): return flist -def json_dumps(data): - # print data in nicely formatted json. - return json.dumps(data, indent=1, sort_keys=True, - separators=(',', ': ')) - - def wrap_and_call(prefix, mocks, func, *args, **kwargs): """ call func(args, **kwargs) with mocks applied, then unapplies mocks diff --git a/cloudinit/util.py b/cloudinit/util.py index 320d64e0..11e96a77 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -533,15 +533,6 @@ def multi_log(text, console=True, stderr=True, log.log(log_level, text) -def load_json(text, root_types=(dict,)): - decoded = json.loads(decode_binary(text)) - if not isinstance(decoded, tuple(root_types)): - expected_types = ", ".join([str(t) for t in root_types]) - raise TypeError("(%s) root types expected, got %s instead" - % (expected_types, type(decoded))) - return decoded - - def is_ipv4(instr): """determine if input string is a ipv4 address. return boolean.""" toks = instr.split('.') @@ -1480,7 +1471,31 @@ def ensure_dirs(dirlist, mode=0o755): ensure_dir(d, mode) +def load_json(text, root_types=(dict,)): + decoded = json.loads(decode_binary(text)) + if not isinstance(decoded, tuple(root_types)): + expected_types = ", ".join([str(t) for t in root_types]) + raise TypeError("(%s) root types expected, got %s instead" + % (expected_types, type(decoded))) + return decoded + + +def json_serialize_default(_obj): + """Handler for types which aren't json serializable.""" + try: + return 'ci-b64:{0}'.format(b64e(_obj)) + except AttributeError: + return 'Warning: redacted unserializable type {0}'.format(type(_obj)) + + +def json_dumps(data): + """Return data in nicely formatted json.""" + return json.dumps(data, indent=1, sort_keys=True, + separators=(',', ': '), default=json_serialize_default) + + def yaml_dumps(obj, explicit_start=True, explicit_end=True): + """Return data in nicely formatted yaml.""" return yaml.safe_dump(obj, line_break="\n", indent=4, diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index 82ee9714..714f5dac 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -67,7 +67,7 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): super(TestAliYunDatasource, self).setUp() cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} distro = {} - paths = helpers.Paths({}) + paths = helpers.Paths({'run_dir': self.tmp_dir()}) self.ds = ay.DataSourceAliYun(cfg, distro, paths) self.metadata_address = self.ds.metadata_urls[0] diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index a4dfb540..3253f3ad 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -18,7 +18,7 @@ import tempfile from cloudinit import helpers from cloudinit import util -from cloudinit.tests.helpers import TestCase +from cloudinit.tests.helpers import CiTestCase import cloudinit.sources.DataSourceAltCloud as dsac @@ -97,7 +97,7 @@ def _dmi_data(expected): return _data -class TestGetCloudType(TestCase): +class TestGetCloudType(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.get_cloud_type() ''' @@ -143,14 +143,16 @@ class TestGetCloudType(TestCase): self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) -class TestGetDataCloudInfoFile(TestCase): +class TestGetDataCloudInfoFile(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.get_data() With a contrived CLOUD_INFO_FILE ''' def setUp(self): '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.cloud_info_file = tempfile.mkstemp()[1] self.dmi_data = util.read_dmi_data dsac.CLOUD_INFO_FILE = self.cloud_info_file @@ -207,14 +209,16 @@ class TestGetDataCloudInfoFile(TestCase): self.assertEqual(False, dsrc.get_data()) -class TestGetDataNoCloudInfoFile(TestCase): +class TestGetDataNoCloudInfoFile(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.get_data() Without a CLOUD_INFO_FILE ''' def setUp(self): '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.dmi_data = util.read_dmi_data dsac.CLOUD_INFO_FILE = \ 'no such file' @@ -254,7 +258,7 @@ class TestGetDataNoCloudInfoFile(TestCase): self.assertEqual(False, dsrc.get_data()) -class TestUserDataRhevm(TestCase): +class TestUserDataRhevm(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.user_data_rhevm() ''' @@ -320,7 +324,7 @@ class TestUserDataRhevm(TestCase): self.assertEqual(False, dsrc.user_data_rhevm()) -class TestUserDataVsphere(TestCase): +class TestUserDataVsphere(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.user_data_vsphere() ''' @@ -368,7 +372,7 @@ class TestUserDataVsphere(TestCase): self.assertEqual(1, m_mount_cb.call_count) -class TestReadUserDataCallback(TestCase): +class TestReadUserDataCallback(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.read_user_data_callback() ''' diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 7cb1812a..226c214a 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -11,9 +11,7 @@ from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, import crypt import os -import shutil import stat -import tempfile import xml.etree.ElementTree as ET import yaml @@ -84,11 +82,11 @@ class TestAzureDataSource(CiTestCase): super(TestAzureDataSource, self).setUp() if PY26: raise SkipTest("Does not work on python 2.6") - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + self.tmp = self.tmp_dir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') self.patches = ExitStack() @@ -642,7 +640,7 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertEqual(netconfig, expected_config) -class TestAzureBounce(TestCase): +class TestAzureBounce(CiTestCase): def mock_out_azure_moving_parts(self): self.patches.enter_context( @@ -669,10 +667,10 @@ class TestAzureBounce(TestCase): def setUp(self): super(TestAzureBounce, self).setUp() - self.tmp = tempfile.mkdtemp() + self.tmp = self.tmp_dir() self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - self.addCleanup(shutil.rmtree, self.tmp) + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.patches = ExitStack() self.mock_out_azure_moving_parts() @@ -714,21 +712,24 @@ class TestAzureBounce(TestCase): def test_disabled_bounce_does_not_change_hostname(self): cfg = {'hostname_bounce': {'policy': 'off'}} - self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() self.assertEqual(0, self.set_hostname.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') def test_disabled_bounce_does_not_perform_bounce( self, perform_hostname_bounce): cfg = {'hostname_bounce': {'policy': 'off'}} - self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() self.assertEqual(0, perform_hostname_bounce.call_count) def test_same_hostname_does_not_change_hostname(self): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'yes'}} - self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() self.assertEqual(0, self.set_hostname.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') @@ -737,7 +738,8 @@ class TestAzureBounce(TestCase): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'yes'}} - self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() self.assertEqual(0, perform_hostname_bounce.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index e4c59907..f6a59b6b 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -3,6 +3,7 @@ import copy from cloudinit.cs_utils import Cepko +from cloudinit import helpers from cloudinit import sources from cloudinit.sources import DataSourceCloudSigma @@ -38,10 +39,12 @@ class CepkoMock(Cepko): return self -class DataSourceCloudSigmaTest(test_helpers.TestCase): +class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + "", "", paths=self.paths) self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) self.datasource.get_data() @@ -85,7 +88,8 @@ class DataSourceCloudSigmaTest(test_helpers.TestCase): def test_lack_of_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() @@ -94,7 +98,8 @@ class DataSourceCloudSigmaTest(test_helpers.TestCase): def test_lack_of_cloudinit_key_in_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"]["cloudinit"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index 96144b64..d6d2d6b2 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -33,6 +33,7 @@ class TestCloudStackPasswordFetching(CiTestCase): self.patches.enter_context(mock.patch( mod_name + '.dhcp.networkd_get_option_from_leases', get_networkd_server_address)) + self.tmp = self.tmp_dir() def _set_password_server_response(self, response_string): subp = mock.MagicMock(return_value=(response_string, '')) @@ -43,26 +44,30 @@ class TestCloudStackPasswordFetching(CiTestCase): def test_empty_password_doesnt_create_config(self): self._set_password_server_response('') - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertEqual({}, ds.get_config_obj()) def test_saved_password_doesnt_create_config(self): self._set_password_server_response('saved_password') - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertEqual({}, ds.get_config_obj()) def test_password_sets_password(self): password = 'SekritSquirrel' self._set_password_server_response(password) - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertEqual(password, ds.get_config_obj()['password']) def test_bad_request_doesnt_stop_ds_from_working(self): self._set_password_server_response('bad_request') - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) self.assertTrue(ds.get_data()) def assertRequestTypesSent(self, subp, expected_request_types): @@ -77,14 +82,16 @@ class TestCloudStackPasswordFetching(CiTestCase): def test_valid_response_means_password_marked_as_saved(self): password = 'SekritSquirrel' subp = self._set_password_server_response(password) - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertRequestTypesSent(subp, ['send_my_password', 'saved_password']) def _check_password_not_saved_for(self, response_string): subp = self._set_password_server_response(response_string) - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertRequestTypesSent(subp, ['send_my_password']) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 237c189b..98497886 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -725,8 +725,9 @@ class TestConvertNetworkData(TestCase): def cfg_ds_from_dir(seed_d): + tmp = tempfile.mkdtemp() cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': tmp})) cfg_ds.seed_dir = seed_d cfg_ds.known_macs = KNOWN_MACS.copy() if not cfg_ds.get_data(): diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index f264f361..ec321733 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -13,7 +13,7 @@ from cloudinit import settings from cloudinit.sources import DataSourceDigitalOcean from cloudinit.sources.helpers import digitalocean -from cloudinit.tests.helpers import mock, TestCase +from cloudinit.tests.helpers import mock, CiTestCase DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] @@ -135,14 +135,17 @@ def _mock_dmi(): return (True, DO_META.get('id')) -class TestDataSourceDigitalOcean(TestCase): +class TestDataSourceDigitalOcean(CiTestCase): """ Test reading the meta-data """ + def setUp(self): + super(TestDataSourceDigitalOcean, self).setUp() + self.tmp = self.tmp_dir() def get_ds(self, get_sysinfo=_mock_dmi): ds = DataSourceDigitalOcean.DataSourceDigitalOcean( - settings.CFG_BUILTIN, None, helpers.Paths({})) + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) ds.use_ip4LL = False if get_sysinfo is not None: ds._get_sysinfo = get_sysinfo @@ -194,7 +197,7 @@ class TestDataSourceDigitalOcean(TestCase): self.assertIsInstance(ds.get_public_ssh_keys(), list) -class TestNetworkConvert(TestCase): +class TestNetworkConvert(CiTestCase): @mock.patch('cloudinit.net.get_interfaces_by_mac') def _get_networking(self, m_get_by_mac): diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index ba328ee9..ba042eac 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -186,6 +186,7 @@ class TestEc2(test_helpers.HttprettyTestCase): super(TestEc2, self).setUp() self.datasource = ec2.DataSourceEc2 self.metadata_addr = self.datasource.metadata_urls[0] + self.tmp = self.tmp_dir() def data_url(self, version): """Return a metadata url based on the version provided.""" @@ -199,7 +200,7 @@ class TestEc2(test_helpers.HttprettyTestCase): def _setup_ds(self, sys_cfg, platform_data, md, md_version=None): self.uris = [] distro = {} - paths = helpers.Paths({}) + paths = helpers.Paths({'run_dir': self.tmp}) if sys_cfg is None: sys_cfg = {} ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index d399ae7a..82c788dc 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -70,9 +70,10 @@ def _set_mock_metadata(gce_meta=None): class TestDataSourceGCE(test_helpers.HttprettyTestCase): def setUp(self): + tmp = self.tmp_dir() self.ds = DataSourceGCE.DataSourceGCE( settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': tmp})) ppatch = self.m_platform_reports_gce = mock.patch( 'cloudinit.sources.DataSourceGCE.platform_reports_gce') self.m_platform_reports_gce = ppatch.start() diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index fea9156b..70d50de4 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -3,22 +3,20 @@ from cloudinit import helpers from cloudinit.sources import DataSourceNoCloud from cloudinit import util -from cloudinit.tests.helpers import TestCase, populate_dir, mock, ExitStack +from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack import os -import shutil -import tempfile import textwrap import yaml -class TestNoCloudDataSource(TestCase): +class TestNoCloudDataSource(CiTestCase): def setUp(self): super(TestNoCloudDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.cmdline = "root=TESTCMDLINE" @@ -215,7 +213,7 @@ class TestNoCloudDataSource(TestCase): self.assertNotIn(gateway, str(dsrc.network_config)) -class TestParseCommandLineData(TestCase): +class TestParseCommandLineData(CiTestCase): def test_parse_cmdline_data_valid(self): ds_id = "ds=nocloud" diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index e7d55692..2326dd58 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -3,12 +3,10 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util -from cloudinit.tests.helpers import mock, populate_dir, TestCase +from cloudinit.tests.helpers import mock, populate_dir, CiTestCase import os import pwd -import shutil -import tempfile import unittest @@ -36,14 +34,14 @@ PUBLIC_IP = '10.0.0.3' DS_PATH = "cloudinit.sources.DataSourceOpenNebula" -class TestOpenNebulaDataSource(TestCase): +class TestOpenNebulaDataSource(CiTestCase): parsed_user = None def setUp(self): super(TestOpenNebulaDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) # defaults for few tests self.ds = ds.DataSourceOpenNebula diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index ed367e05..42c31554 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -131,6 +131,10 @@ def _read_metadata_service(): class TestOpenStackDataSource(test_helpers.HttprettyTestCase): VERSION = 'latest' + def setUp(self): + super(TestOpenStackDataSource, self).setUp() + self.tmp = self.tmp_dir() + @hp.activate def test_successful(self): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) @@ -232,7 +236,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) found = ds_os.get_data() self.assertTrue(found) @@ -256,7 +260,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) found = ds_os.get_data() self.assertFalse(found) @@ -271,7 +275,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) ds_os.ds_cfg = { 'max_wait': 0, 'timeout': 0, @@ -294,7 +298,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) ds_os.ds_cfg = { 'max_wait': 0, 'timeout': 0, diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 436df9ee..8dec06b1 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -9,7 +9,7 @@ from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceScaleway -from cloudinit.tests.helpers import mock, HttprettyTestCase, TestCase +from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase class DataResponses(object): @@ -63,7 +63,11 @@ class MetadataResponses(object): return 200, headers, json.dumps(cls.FAKE_METADATA) -class TestOnScaleway(TestCase): +class TestOnScaleway(CiTestCase): + + def setUp(self): + super(TestOnScaleway, self).setUp() + self.tmp = self.tmp_dir() def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): mock, faked = fake_dmi @@ -91,7 +95,7 @@ class TestOnScaleway(TestCase): # When not on Scaleway, get_data() returns False. datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({}) + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}) ) self.assertFalse(datasource.get_data()) @@ -159,8 +163,9 @@ def get_source_address_adapter(*args, **kwargs): class TestDataSourceScaleway(HttprettyTestCase): def setUp(self): + tmp = self.tmp_dir() self.datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({}) + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp}) ) super(TestDataSourceScaleway, self).setUp() diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 933d5b63..88bae5f9 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -359,7 +359,8 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.tmp = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp) - self.paths = c_helpers.Paths({'cloud_dir': self.tmp}) + self.paths = c_helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.legacy_user_d = os.path.join(self.tmp, 'legacy_user_tmp') os.mkdir(self.legacy_user_d) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 1284e755..7a920d42 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -7,7 +7,7 @@ from uuid import uuid4 from cloudinit import safeyaml from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, dir2dict, json_dumps, populate_dir) + CiTestCase, dir2dict, populate_dir) UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") @@ -319,7 +319,7 @@ def _print_run_output(rc, out, err, cfg, files): '-- rc = %s --' % rc, '-- out --', str(out), '-- err --', str(err), - '-- cfg --', json_dumps(cfg)])) + '-- cfg --', util.json_dumps(cfg)])) print('-- files --') for k, v in files.items(): if "/_shwrap" in k: diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index add93653..5d3f1ca3 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -23,6 +23,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): cfg = { 'datasource_list': ['None'], 'cloud_init_modules': ['write-files'], + 'system_info': {'paths': {'run_dir': new_root}} } ud = self.readResource('user_data.1.txt') cloud_cfg = util.yaml_dumps(cfg) diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index b8fb4794..762974e9 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -2,10 +2,10 @@ import os -from cloudinit.tests import helpers from cloudinit.settings import PER_INSTANCE from cloudinit import stages +from cloudinit.tests import helpers from cloudinit import util @@ -23,6 +23,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): 'datasource_list': ['None'], 'runcmd': ['ls /etc'], # test ALL_DISTROS 'spacewalk': {}, # test non-ubuntu distros module definition + 'system_info': {'paths': {'run_dir': self.new_root}}, 'write_files': [ { 'path': '/etc/blah.ini', -- cgit v1.2.3 From a110e483e8644ab73e69853ea11b6c4c6cfa04b6 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 6 Dec 2017 16:30:22 -0600 Subject: pylint: Update pylint to 1.7.1, run on tests/ and tools and fix complaints. The motivation for this is that a.) 1.7.1 runs with python 3.6 (bionic) b.) we want to run pylint on tests/ and tools for the same reasons that we want to run it on cloudinit/ The changes are described below. - Update tox.ini to invoke pylint v1.7.1. - Modify .pylintrc generated-members ignore mocked object members (m_.*) - Replace "dangerous" params defaulting to {} - Fix up cloud_tests use of platforms - Cast some instance objects to with dict() - Handle python2.7 vs 3+ ConfigParser use of readfp (deprecated) - Update use of assertEqual(, value) to assert(value) - replace depricated assertRegexp -> assertRegex - Remove useless test-class calls to super class - Assign class property accessors a result and use it - Fix missing class member in CepkoResultTests - Fix Cheetah test import --- .pylintrc | 2 +- cloudinit/cmd/tests/test_clean.py | 2 +- cloudinit/cmd/tests/test_status.py | 2 +- cloudinit/tests/helpers.py | 35 +++++ tests/cloud_tests/__init__.py | 6 + tests/cloud_tests/bddeb.py | 9 +- tests/cloud_tests/collect.py | 6 +- tests/cloud_tests/config.py | 4 +- tests/cloud_tests/testcases/base.py | 3 +- .../testcases/modules/set_hostname_fqdn.py | 2 +- tests/cloud_tests/util.py | 2 +- tests/unittests/test_cs_util.py | 1 + tests/unittests/test_datasource/test_azure.py | 31 ++-- .../unittests/test_datasource/test_digitalocean.py | 9 +- tests/unittests/test_datasource/test_ec2.py | 3 +- tests/unittests/test_distros/test_create_users.py | 7 +- tests/unittests/test_distros/test_netconfig.py | 3 - tests/unittests/test_handler/test_handler_lxd.py | 3 - .../test_handler/test_handler_power_state.py | 3 - .../test_handler/test_handler_yum_add_repo.py | 10 +- .../test_handler/test_handler_zypper_add_repo.py | 7 +- tests/unittests/test_reporting.py | 2 +- tests/unittests/test_templating.py | 2 +- tests/unittests/test_util.py | 6 +- tests/unittests/test_vmware_config_file.py | 3 +- tools/hacking.py | 172 --------------------- tools/make-mime.py | 2 +- tools/mock-meta.py | 45 +++--- tox.ini | 5 +- 29 files changed, 121 insertions(+), 266 deletions(-) delete mode 100755 tools/hacking.py (limited to 'tests') diff --git a/.pylintrc b/.pylintrc index b160ce7b..3ad36924 100644 --- a/.pylintrc +++ b/.pylintrc @@ -56,5 +56,5 @@ ignored-classes=optparse.Values,thread._local # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. -generated-members=types,http.client,command_handlers +generated-members=types,http.client,command_handlers,m_.* diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index af438aab..1379740b 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -151,7 +151,7 @@ class TestClean(CiTestCase): 'sys.argv': {'new': ['clean', '--logs']}}, clean.main) - self.assertEqual(0, context_manager.exception.code) + self.assertRaisesCodeEqual(0, context_manager.exception.code) self.assertFalse( os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index 8ec9b5bc..6d4a11e8 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -347,7 +347,7 @@ class TestStatus(CiTestCase): '_is_cloudinit_disabled': (False, ''), 'Init': {'side_effect': self.init_class}}, status.main) - self.assertEqual(0, context_manager.exception.code) + self.assertRaisesCodeEqual(0, context_manager.exception.code) self.assertEqual('status: running\n', m_stdout.getvalue()) # vi: ts=4 expandtab syntax=python diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index feb884ab..0080c729 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -19,6 +19,11 @@ try: except ImportError: from contextlib2 import ExitStack +try: + from configparser import ConfigParser +except ImportError: + from ConfigParser import ConfigParser + from cloudinit import helpers as ch from cloudinit import util @@ -113,6 +118,16 @@ class TestCase(unittest2.TestCase): self.addCleanup(m.stop) setattr(self, attr, p) + # prefer python3 read_file over readfp but allow fallback + def parse_and_read(self, contents): + parser = ConfigParser() + if hasattr(parser, 'read_file'): + parser.read_file(contents) + elif hasattr(parser, 'readfp'): + # pylint: disable=W1505 + parser.readfp(contents) + return parser + class CiTestCase(TestCase): """This is the preferred test case base class unless user @@ -158,6 +173,18 @@ class CiTestCase(TestCase): dir = self.tmp_dir() return os.path.normpath(os.path.abspath(os.path.join(dir, path))) + def assertRaisesCodeEqual(self, expected, found): + """Handle centos6 having different context manager for assertRaises. + with assertRaises(Exception) as e: + raise Exception("BOO") + + centos6 will have e.exception as an integer. + anything nwere will have it as something with a '.code'""" + if isinstance(found, int): + self.assertEqual(expected, found) + else: + self.assertEqual(expected, found.code) + class ResourceUsingTestCase(CiTestCase): @@ -395,4 +422,12 @@ if not hasattr(mock.Mock, 'assert_not_called'): mock.Mock.assert_not_called = __mock_assert_not_called +# older unittest2.TestCase (centos6) do not have assertRaisesRegex +# And setting assertRaisesRegex to assertRaisesRegexp causes +# https://github.com/PyCQA/pylint/issues/1653 . So the workaround. +if not hasattr(unittest2.TestCase, 'assertRaisesRegex'): + def _tricky(*args, **kwargs): + return unittest2.TestCase.assertRaisesRegexp + unittest2.TestCase.assertRaisesRegex = _tricky + # vi: ts=4 expandtab diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py index 98c1d6c7..dd436989 100644 --- a/tests/cloud_tests/__init__.py +++ b/tests/cloud_tests/__init__.py @@ -10,6 +10,12 @@ TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases') TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases') TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2]) +# This domain contains reverse lookups for hostnames that are used. +# The primary reason is so sudo will return quickly when it attempts +# to look up the hostname. i9n is just short for 'integration'. +# see also bug 1730744 for why we had to do this. +CI_DOMAIN = "i9n.cloud-init.io" + def _initialize_logging(): """Configure logging for cloud_tests.""" diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py index c259dfea..a6d5069f 100644 --- a/tests/cloud_tests/bddeb.py +++ b/tests/cloud_tests/bddeb.py @@ -8,8 +8,7 @@ import tempfile from cloudinit import util as c_util from tests.cloud_tests import (config, LOG) -from tests.cloud_tests.platforms import (platforms, images, snapshots, - instances) +from tests.cloud_tests import platforms from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) pre_reqs = ['devscripts', 'equivs', 'git', 'tar'] @@ -85,18 +84,18 @@ def setup_build(args): # set up image LOG.info('acquiring image for os: %s', args.build_os) img_conf = config.load_os_config(platform.platform_name, args.build_os) - image_call = partial(images.get_image, platform, img_conf) + image_call = partial(platforms.get_image, platform, img_conf) with PlatformComponent(image_call) as image: # set up snapshot - snapshot_call = partial(snapshots.get_snapshot, image) + snapshot_call = partial(platforms.get_snapshot, image) with PlatformComponent(snapshot_call) as snapshot: # create instance with cloud-config to set it up LOG.info('creating instance to build deb in') empty_cloud_config = "#cloud-config\n{}" instance_call = partial( - instances.get_instance, snapshot, empty_cloud_config, + platforms.get_instance, snapshot, empty_cloud_config, use_desc='build cloud-init deb') with PlatformComponent(instance_call) as instance: diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index db5ee99f..4805cea1 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -64,9 +64,9 @@ def collect_test_data(args, snapshot, os_name, test_name): # skip the testcase with a warning req_features = test_config.get('required_features', []) if any(feature not in snapshot.features for feature in req_features): - LOG.warn('test config %s requires features not supported by image, ' - 'skipping.\nrequired features: %s\nsupported features: %s', - test_name, req_features, snapshot.features) + LOG.warning('test config %s requires features not supported by image, ' + 'skipping.\nrequired features: %s\nsupported features: %s', + test_name, req_features, snapshot.features) return ({}, 0) # if there are user data overrides required for this test case, apply them diff --git a/tests/cloud_tests/config.py b/tests/cloud_tests/config.py index 52fc2bda..8bd569fd 100644 --- a/tests/cloud_tests/config.py +++ b/tests/cloud_tests/config.py @@ -92,7 +92,7 @@ def load_platform_config(platform_name, require_enabled=False): def load_os_config(platform_name, os_name, require_enabled=False, - feature_overrides={}): + feature_overrides=None): """Load configuration for os. @param platform_name: platform name to load os config for @@ -101,6 +101,8 @@ def load_os_config(platform_name, os_name, require_enabled=False, @param feature_overrides: feature flag overrides to merge with features @return_value: config dict """ + if feature_overrides is None: + feature_overrides = {} main_conf = c_util.read_conf(RELEASES_CONF) default = main_conf['default_release_config'] image = main_conf['releases'][os_name] diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 1706f59b..1c5b5405 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -12,7 +12,8 @@ from cloudinit import util as c_util class CloudTestCase(unittest.TestCase): """Base test class for verifiers.""" - data = None + # data gets populated in get_suite.setUpClass + data = {} conf = None _cloud_config = None diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py index eb6f0650..a405b30b 100644 --- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py +++ b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. """cloud-init Integration Test Verify Script.""" -from tests.cloud_tests.instances.nocloudkvm import CI_DOMAIN +from tests.cloud_tests import CI_DOMAIN from tests.cloud_tests.testcases import base diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py index c5cd6974..2aedcd0d 100644 --- a/tests/cloud_tests/util.py +++ b/tests/cloud_tests/util.py @@ -262,7 +262,7 @@ def shell_safe(cmd): out = subprocess.check_output( ["getopt", "--shell", "sh", "--options", "", "--", "--"] + list(cmd)) # out contains ' -- \n'. drop the ' -- ' and the '\n' - return out[4:-1].decode() + return out.decode()[4:-1] def shell_pack(cmd): diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py index ee88520d..2a1095b9 100644 --- a/tests/unittests/test_cs_util.py +++ b/tests/unittests/test_cs_util.py @@ -35,6 +35,7 @@ class CepkoMock(Cepko): # touched the underlying Cepko class methods. class CepkoResultTests(test_helpers.TestCase): def setUp(self): + self.c = Cepko() raise test_helpers.SkipTest('This test is completely useless') def test_getitem(self): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 226c214a..5ab48897 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -36,9 +36,9 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): """ for key, dval in data.items(): if isinstance(dval, dict): - val = dval.get('text') - attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v in dval.items() - if k != 'text']) + val = dict(dval).get('text') + attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v + in dict(dval).items() if k != 'text']) else: val = dval attrs = "" @@ -897,9 +897,6 @@ class TestCanDevBeReformatted(CiTestCase): setattr(self, sattr, patcher.start()) self.addCleanup(patcher.stop) - def setUp(self): - super(TestCanDevBeReformatted, self).setUp() - def patchup(self, devs): bypath = {} for path, data in devs.items(): @@ -954,14 +951,14 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda3': {'num': 3}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(False, value) + self.assertFalse(value) self.assertIn("3 or more", msg.lower()) def test_no_partitions_is_false(self): """A disk with no partitions can not be formatted.""" self.patchup({'/dev/sda': {}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("not partitioned", msg.lower()) def test_two_partitions_not_ntfs_false(self): @@ -973,7 +970,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(False, value) + self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) def test_two_partitions_ntfs_populated_false(self): @@ -986,7 +983,7 @@ class TestCanDevBeReformatted(CiTestCase): 'files': ['secret.txt']}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(False, value) + self.assertFalse(value) self.assertIn("files on it", msg.lower()) def test_two_partitions_ntfs_empty_is_true(self): @@ -998,7 +995,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_not_ntfs_false(self): @@ -1009,7 +1006,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'zfs'}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) def test_one_partition_ntfs_populated_false(self): @@ -1021,7 +1018,7 @@ class TestCanDevBeReformatted(CiTestCase): 'files': ['file1.txt', 'file2.exe']}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("files on it", msg.lower()) def test_one_partition_ntfs_empty_is_true(self): @@ -1032,7 +1029,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): @@ -1044,7 +1041,7 @@ class TestCanDevBeReformatted(CiTestCase): 'files': ['dataloss_warning_readme.txt']} }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_through_realpath_is_true(self): @@ -1059,7 +1056,7 @@ class TestCanDevBeReformatted(CiTestCase): 'realpath': '/dev/sdb1'} }}}) value, msg = dsaz.can_dev_be_reformatted(epath) - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_three_partition_through_realpath_is_false(self): @@ -1078,7 +1075,7 @@ class TestCanDevBeReformatted(CiTestCase): 'realpath': '/dev/sdb3'} }}}) value, msg = dsaz.can_dev_be_reformatted(epath) - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("3 or more", msg.lower()) diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index ec321733..3127014b 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -199,9 +199,8 @@ class TestDataSourceDigitalOcean(CiTestCase): class TestNetworkConvert(CiTestCase): - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def _get_networking(self, m_get_by_mac): - m_get_by_mac.return_value = { + def _get_networking(self): + self.m_get_by_mac.return_value = { '04:01:57:d1:9e:01': 'ens1', '04:01:57:d1:9e:02': 'ens2', 'b8:ae:ed:75:5f:9a': 'enp0s25', @@ -211,6 +210,10 @@ class TestNetworkConvert(CiTestCase): self.assertIn('config', netcfg) return netcfg + def setUp(self): + super(TestNetworkConvert, self).setUp() + self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac') + def test_networking_defined(self): netcfg = self._get_networking() self.assertIsNotNone(netcfg) diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index ba042eac..f0dc8338 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -330,7 +330,8 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.fallback_nic = 'eth9' with mock.patch(get_interface_mac_path) as m_get_interface_mac: m_get_interface_mac.return_value = mac1 - ds.network_config # Will re-crawl network metadata + nc = ds.network_config # Will re-crawl network metadata + self.assertIsNotNone(nc) self.assertIn('Re-crawl of metadata service', self.logs.getvalue()) expected = {'version': 1, 'config': [ {'mac_address': '06:17:04:d7:26:09', diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index aa13670a..5670904a 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -7,7 +7,11 @@ from cloudinit.tests.helpers import (TestCase, mock) class MyBaseDistro(distros.Distro): # MyBaseDistro is here to test base Distro class implementations - def __init__(self, name="basedistro", cfg={}, paths={}): + def __init__(self, name="basedistro", cfg=None, paths=None): + if not cfg: + cfg = {} + if not paths: + paths = {} super(MyBaseDistro, self).__init__(name, cfg, paths) def install_packages(self, pkglist): @@ -42,7 +46,6 @@ class MyBaseDistro(distros.Distro): @mock.patch("cloudinit.distros.util.subp") class TestCreateUser(TestCase): def setUp(self): - super(TestCase, self).setUp() self.dist = MyBaseDistro() def _useradd2call(self, args): diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index c4bd11bc..8d0b2634 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -188,9 +188,6 @@ hn0: flags=8843 metric 0 mtu 1500 status: active """ - def setUp(self): - super(TestNetCfgDistro, self).setUp() - def _get_distro(self, dname, renderers=None): cls = distros.fetch(dname) cfg = settings.CFG_BUILTIN diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index e0d9ab6c..a2054980 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -25,9 +25,6 @@ class TestLxd(t_help.CiTestCase): } } - def setUp(self): - super(TestLxd, self).setUp() - def _get_cloud(self, distro): cls = distros.fetch(distro) paths = helpers.Paths({}) diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 85a0fe0a..3c726422 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -9,9 +9,6 @@ from cloudinit.tests.helpers import mock class TestLoadPowerState(t_help.TestCase): - def setUp(self): - super(self.__class__, self).setUp() - def test_no_config(self): # completely empty config should mean do nothing (cmd, _timeout, _condition) = psc.load_power_state({}) diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index b7adbe50..b90a3af3 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -5,10 +5,6 @@ from cloudinit import util from cloudinit.tests import helpers -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser import logging import shutil from six import StringIO @@ -58,8 +54,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") - parser = ConfigParser() - parser.readfp(StringIO(contents)) + parser = self.parse_and_read(StringIO(contents)) expected = { 'epel_testing': { 'name': 'Extra Packages for Enterprise Linux 5 - Testing', @@ -95,8 +90,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo") - parser = ConfigParser() - parser.readfp(StringIO(contents)) + parser = self.parse_and_read(StringIO(contents)) expected = { 'puppetlabs_products': { 'name': 'Puppet Labs Products El 6 - $basearch', diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/test_handler/test_handler_zypper_add_repo.py index 315c2a5e..72ab6c08 100644 --- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py +++ b/tests/unittests/test_handler/test_handler_zypper_add_repo.py @@ -9,10 +9,6 @@ from cloudinit import util from cloudinit.tests import helpers from cloudinit.tests.helpers import mock -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser import logging from six import StringIO @@ -70,8 +66,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): root_d = self.tmp_dir() cc_zypper_add_repo._write_repos(cfg['repos'], root_d) contents = util.load_file("%s/testing-foo.repo" % root_d) - parser = ConfigParser() - parser.readfp(StringIO(contents)) + parser = self.parse_and_read(StringIO(contents)) expected = { 'testing-foo': { 'name': 'test-foo', diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index 571420ed..e15ba6cf 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -126,7 +126,7 @@ class TestBaseReportingHandler(TestCase): def test_base_reporting_handler_is_abstract(self): regexp = r".*abstract.*publish_event.*" - self.assertRaisesRegexp(TypeError, regexp, handlers.ReportingHandler) + self.assertRaisesRegex(TypeError, regexp, handlers.ReportingHandler) class TestLogHandler(TestCase): diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index b911d929..53154d33 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -14,7 +14,7 @@ from cloudinit import templater try: import Cheetah HAS_CHEETAH = True - Cheetah # make pyflakes happy, as Cheetah is not used here + c = Cheetah # make pyflakes and pylint happy, as Cheetah is not used here except ImportError: HAS_CHEETAH = False diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 71f59529..787ca208 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -695,9 +695,9 @@ class TestSubp(helpers.CiTestCase): util.write_file(noshebang, 'true\n') os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) - self.assertRaisesRegexp(util.ProcessExecutionError, - 'Missing #! in script\?', - util.subp, (noshebang,)) + self.assertRaisesRegex(util.ProcessExecutionError, + 'Missing #! in script\?', + util.subp, (noshebang,)) def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 808d303a..0f8cda95 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -133,7 +133,8 @@ class TestVmwareConfigFile(CiTestCase): conf = Config(cf) with self.assertRaises(ValueError): - conf.reset_password() + pw = conf.reset_password + self.assertIsNone(pw) cf.clear() cf._insertKey("PASSWORD|RESET", "yes") diff --git a/tools/hacking.py b/tools/hacking.py deleted file mode 100755 index e6a05136..00000000 --- a/tools/hacking.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2012, Cloudscaling -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""cloudinit HACKING file compliance testing (based off of nova hacking.py) - -built on top of pep8.py -""" - -import inspect -import logging -import re -import sys - -import pep8 - -# Don't need this for testing -logging.disable('LOG') - -# N1xx comments -# N2xx except -# N3xx imports -# N4xx docstrings -# N[5-9]XX (future use) - -DOCSTRING_TRIPLE = ['"""', "'''"] -VERBOSE_MISSING_IMPORT = False -_missingImport = set([]) - - -def import_normalize(line): - # convert "from x import y" to "import x.y" - # handle "from x import y as z" to "import x.y as z" - split_line = line.split() - if (line.startswith("from ") and "," not in line and - split_line[2] == "import" and split_line[3] != "*" and - split_line[1] != "__future__" and - (len(split_line) == 4 or (len(split_line) == 6 and - split_line[4] == "as"))): - return "import %s.%s" % (split_line[1], split_line[3]) - else: - return line - - -def cloud_import_alphabetical(physical_line, line_number, lines): - """Check for imports in alphabetical order. - - HACKING guide recommendation for imports: - imports in human alphabetical order - N306 - """ - # handle import x - # use .lower since capitalization shouldn't dictate order - split_line = import_normalize(physical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2]) - split_previous = split_previous.strip().lower().split() - # with or without "as y" - length = [2, 4] - if (len(split_line) in length and len(split_previous) in length and - split_line[0] == "import" and split_previous[0] == "import"): - if split_line[1] < split_previous[1]: - return (0, "N306: imports not in alphabetical order (%s, %s)" - % (split_previous[1], split_line[1])) - - -def cloud_docstring_start_space(physical_line): - """Check for docstring not start with space. - - HACKING guide recommendation for docstring: - Docstring should not start with space - N401 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - if (pos != -1 and len(physical_line) > pos + 1): - if (physical_line[pos + 3] == ' '): - return (pos, - "N401: one line docstring should not start with a space") - - -def cloud_todo_format(physical_line): - """Check for 'TODO()'. - - HACKING guide recommendation for TODO: - Include your name with TODOs as in "#TODO(termie)" - N101 - """ - pos = physical_line.find('TODO') - pos1 = physical_line.find('TODO(') - pos2 = physical_line.find('#') # make sure it's a comment - if (pos != pos1 and pos2 >= 0 and pos2 < pos): - return pos, "N101: Use TODO(NAME)" - - -def cloud_docstring_one_line(physical_line): - """Check one line docstring end. - - HACKING guide recommendation for one line docstring: - A one line docstring looks like this and ends in a period. - N402 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end - if (pos != -1 and end and len(physical_line) > pos + 4): - if (physical_line[-5] != '.'): - return pos, "N402: one line docstring needs a period" - - -def cloud_docstring_multiline_end(physical_line): - """Check multi line docstring end. - - HACKING guide recommendation for docstring: - Docstring should end on a new line - N403 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - if (pos != -1 and len(physical_line) == pos): - print(physical_line) - if (physical_line[pos + 3] == ' '): - return (pos, "N403: multi line docstring end on new line") - - -current_file = "" - - -def readlines(filename): - """Record the current file being tested.""" - pep8.current_file = filename - return open(filename).readlines() - - -def add_cloud(): - """Monkey patch pep8 for cloud-init guidelines. - - Look for functions that start with cloud_ - and add them to pep8 module. - - Assumes you know how to write pep8.py checks - """ - for name, function in globals().items(): - if not inspect.isfunction(function): - continue - if name.startswith("cloud_"): - exec("pep8.%s = %s" % (name, name)) - - -if __name__ == "__main__": - # NOVA based 'hacking.py' error codes start with an N - pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}') - add_cloud() - pep8.current_file = current_file - pep8.readlines = readlines - try: - pep8._main() - finally: - if len(_missingImport) > 0: - sys.stderr.write( - "%i imports missing in this test environment\n" % - len(_missingImport)) - -# vi: ts=4 expandtab diff --git a/tools/make-mime.py b/tools/make-mime.py index f6a72044..d321479b 100755 --- a/tools/make-mime.py +++ b/tools/make-mime.py @@ -23,7 +23,7 @@ def file_content_type(text): filename, content_type = text.split(":", 1) return (open(filename, 'r'), filename, content_type.strip()) except ValueError: - raise argparse.ArgumentError("Invalid value for %r" % (text)) + raise argparse.ArgumentError(text, "Invalid value for %r" % (text)) def main(): diff --git a/tools/mock-meta.py b/tools/mock-meta.py index a5d14ab7..724f7fc4 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -17,6 +17,7 @@ Then: ec2metadata --instance-id """ +import argparse import functools import json import logging @@ -27,8 +28,6 @@ import string import sys import yaml -from optparse import OptionParser - try: from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler import httplib as hclient @@ -415,29 +414,27 @@ def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'): def extract_opts(): - parser = OptionParser() - parser.add_option("-p", "--port", dest="port", action="store", type=int, - default=80, metavar="PORT", - help=("port from which to serve traffic" - " (default: %default)")) - parser.add_option("-a", "--addr", dest="address", action="store", type=str, - default='::', metavar="ADDRESS", - help=("address from which to serve traffic" - " (default: %default)")) - parser.add_option("-f", '--user-data-file', dest='user_data_file', - action='store', metavar='FILE', - help=("user data filename to serve back to" - "incoming requests")) - (options, args) = parser.parse_args() - out = dict() - out['extra'] = args - out['port'] = options.port - out['user_data_file'] = None - out['address'] = options.address - if options.user_data_file: - if not os.path.isfile(options.user_data_file): + parser = argparse.ArgumentParser() + parser.add_argument("-p", "--port", dest="port", action="store", type=int, + default=80, metavar="PORT", + help=("port from which to serve traffic" + " (default: %default)")) + parser.add_argument("-a", "--addr", dest="address", action="store", + type=str, default='::', metavar="ADDRESS", + help=("address from which to serve traffic" + " (default: %default)")) + parser.add_argument("-f", '--user-data-file', dest='user_data_file', + action='store', metavar='FILE', + help=("user data filename to serve back to" + "incoming requests")) + parser.add_argument('extra', nargs='*') + args = parser.parse_args() + out = {'port': args.port, 'address': args.address, 'extra': args.extra, + 'user_data_file': None} + if args.user_data_file: + if not os.path.isfile(args.user_data_file): parser.error("Option -f specified a non-existent file") - with open(options.user_data_file, 'rb') as fh: + with open(args.user_data_file, 'rb') as fh: out['user_data_file'] = fh.read() return out diff --git a/tox.ini b/tox.ini index 92232201..d7316cc2 100644 --- a/tox.ini +++ b/tox.ini @@ -21,12 +21,13 @@ setenv = LC_ALL = en_US.utf-8 [testenv:pylint] +basepython = python3 deps = # requirements pylint==1.7.1 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt -commands = {envpython} -m pylint {posargs:cloudinit} +commands = {envpython} -m pylint {posargs:cloudinit tests tools} [testenv:py3] basepython = python3 @@ -119,7 +120,7 @@ commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} deps = pyflakes [testenv:tip-pylint] -commands = {envpython} -m pylint {posargs:cloudinit} +commands = {envpython} -m pylint {posargs:cloudinit tests tools} deps = # requirements pylint -- cgit v1.2.3 From 05b2308aa7e30337c2a455b5d2c67871b233e25c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Dec 2017 16:33:56 -0500 Subject: citest: In NoCloudKVM provide keys via metadata not userdata. The NoCloudKVM platform was inserting ssh keys via user-data rather than through meta-data like it is done on other platforms. This way we are not forced to change the user-data provided. Also, provide meta-data including a uuid as the instance-id. --- tests/cloud_tests/platforms/nocloudkvm/instance.py | 39 +++++++++++++++++++--- tests/cloud_tests/platforms/nocloudkvm/snapshot.py | 20 ----------- 2 files changed, 35 insertions(+), 24 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py index a87d76a6..9bb24256 100644 --- a/tests/cloud_tests/platforms/nocloudkvm/instance.py +++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py @@ -2,13 +2,16 @@ """Base NoCloud KVM instance.""" +import copy import os import paramiko import socket import subprocess import time +import uuid from ..instances import Instance +from cloudinit.atomic_helper import write_json from cloudinit import util as c_util from tests.cloud_tests import util @@ -37,14 +40,38 @@ class NoCloudKVMInstance(Instance): @param features: dictionary of supported feature flags """ self.user_data = user_data - self.meta_data = meta_data - self.ssh_key_file = os.path.join(platform.config['data_dir'], - platform.config['private_key']) + if meta_data: + meta_data = copy.deepcopy(meta_data) + else: + meta_data = {} + + if 'instance-id' in meta_data: + iid = meta_data['instance-id'] + else: + iid = str(uuid.uuid1()) + meta_data['instance-id'] = iid + + self.instance_id = iid + self.ssh_key_file = os.path.join( + platform.config['data_dir'], platform.config['private_key']) + self.ssh_pubkey_file = os.path.join( + platform.config['data_dir'], platform.config['public_key']) + + self.ssh_pubkey = None + if self.ssh_pubkey_file: + with open(self.ssh_pubkey_file, "r") as fp: + self.ssh_pubkey = fp.read().rstrip('\n') + + if not meta_data.get('public-keys'): + meta_data['public-keys'] = [] + meta_data['public-keys'].append(self.ssh_pubkey) + self.ssh_port = None self.pid = None self.pid_file = None self.console_file = None self.disk = image_path + self.meta_data = meta_data super(NoCloudKVMInstance, self).__init__( platform, name, properties, config, features) @@ -78,11 +105,15 @@ class NoCloudKVMInstance(Instance): """Generate nocloud seed from user-data""" seed_file = os.path.join(tmpdir, '%s_seed.img' % self.name) user_data_file = os.path.join(tmpdir, '%s_user_data' % self.name) + meta_data_file = os.path.join(tmpdir, '%s_meta_data' % self.name) with open(user_data_file, "w") as ud_file: ud_file.write(self.user_data) - c_util.subp(['cloud-localds', seed_file, user_data_file]) + # meta-data can be yaml, but more easily pretty printed with json + write_json(meta_data_file, self.meta_data) + c_util.subp(['cloud-localds', seed_file, user_data_file, + meta_data_file]) return seed_file diff --git a/tests/cloud_tests/platforms/nocloudkvm/snapshot.py b/tests/cloud_tests/platforms/nocloudkvm/snapshot.py index 0005e1f2..2dae3590 100644 --- a/tests/cloud_tests/platforms/nocloudkvm/snapshot.py +++ b/tests/cloud_tests/platforms/nocloudkvm/snapshot.py @@ -41,10 +41,6 @@ class NoCloudKVMSnapshot(Snapshot): @param use_desc: description of snapshot instance use @return_value: an Instance """ - key_file = os.path.join(self.platform.config['data_dir'], - self.platform.config['public_key']) - user_data = self.inject_ssh_key(user_data, key_file) - instance = self.platform.create_instance( self.properties, self.config, self.features, self._image_path, image_desc=str(self), use_desc=use_desc, @@ -55,22 +51,6 @@ class NoCloudKVMSnapshot(Snapshot): return instance - def inject_ssh_key(self, user_data, key_file): - """Inject the authorized key into the user_data.""" - with open(key_file) as f: - value = f.read() - - key = 'ssh_authorized_keys:' - value = ' - %s' % value.strip() - user_data = user_data.split('\n') - if key in user_data: - user_data.insert(user_data.index(key) + 1, '%s' % value) - else: - user_data.insert(-1, '%s' % key) - user_data.insert(-1, '%s' % value) - - return '\n'.join(user_data) - def destroy(self): """Clean up snapshot data.""" shutil.rmtree(self._workd) -- cgit v1.2.3 From ce33e423cde806a0590fec635778d62836e1bd37 Mon Sep 17 00:00:00 2001 From: Maitreyee Saikia Date: Fri, 8 Dec 2017 10:10:40 -0700 Subject: VMware: Support for user provided pre and post-customization scripts In the VMware customization workflow, we have some options for the user to upload scripts for additional customization. Based on user request, those custom scripts can be either run before regular customization or after. For post customization scripts, we decide whether to run the scripts just after customization or post system reboot. --- cloudinit/sources/DataSourceOVF.py | 125 ++++++++++++----- cloudinit/sources/helpers/vmware/imc/config.py | 4 + .../helpers/vmware/imc/config_custom_script.py | 153 +++++++++++++++++++++ cloudinit/sources/helpers/vmware/imc/config_nic.py | 2 +- tests/unittests/test_datasource/test_ovf.py | 111 ++++++++++++++- tests/unittests/test_vmware/__init__.py | 0 tests/unittests/test_vmware/test_custom_script.py | 99 +++++++++++++ tests/unittests/test_vmware_config_file.py | 7 + 8 files changed, 459 insertions(+), 42 deletions(-) create mode 100644 cloudinit/sources/helpers/vmware/imc/config_custom_script.py create mode 100644 tests/unittests/test_vmware/__init__.py create mode 100644 tests/unittests/test_vmware/test_custom_script.py (limited to 'tests') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 6ac621f2..6e62f984 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -21,6 +21,8 @@ from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config \ import Config +from cloudinit.sources.helpers.vmware.imc.config_custom_script \ + import PreCustomScript, PostCustomScript from cloudinit.sources.helpers.vmware.imc.config_file \ import ConfigFile from cloudinit.sources.helpers.vmware.imc.config_nic \ @@ -30,7 +32,7 @@ from cloudinit.sources.helpers.vmware.imc.config_passwd \ from cloudinit.sources.helpers.vmware.imc.guestcust_error \ import GuestCustErrorEnum from cloudinit.sources.helpers.vmware.imc.guestcust_event \ - import GuestCustEventEnum + import GuestCustEventEnum as GuestCustEvent from cloudinit.sources.helpers.vmware.imc.guestcust_state \ import GuestCustStateEnum from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( @@ -127,17 +129,31 @@ class DataSourceOVF(sources.DataSource): self._vmware_cust_conf = Config(cf) (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) self._vmware_nics_to_enable = get_nics_to_enable(nicspath) - markerid = self._vmware_cust_conf.marker_id - markerexists = check_marker_exists(markerid) + imcdirpath = os.path.dirname(vmwareImcConfigFilePath) + product_marker = self._vmware_cust_conf.marker_id + hasmarkerfile = check_marker_exists( + product_marker, os.path.join(self.paths.cloud_dir, 'data')) + special_customization = product_marker and not hasmarkerfile + customscript = self._vmware_cust_conf.custom_script_name except Exception as e: - LOG.debug("Error parsing the customization Config File") - LOG.exception(e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) - raise e - finally: - util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) + _raise_error_status( + "Error parsing the customization Config File", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + + if special_customization: + if customscript: + try: + precust = PreCustomScript(customscript, imcdirpath) + precust.execute() + except Exception as e: + _raise_error_status( + "Error executing pre-customization script", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + try: LOG.debug("Preparing the Network configuration") self._network_config = get_network_config_from_conf( @@ -146,13 +162,13 @@ class DataSourceOVF(sources.DataSource): True, self.distro.osfamily) except Exception as e: - LOG.exception(e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) - raise e + _raise_error_status( + "Error preparing Network Configuration", + e, + GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, + vmwareImcConfigFilePath) - if markerid and not markerexists: + if special_customization: LOG.debug("Applying password customization") pwdConfigurator = PasswordConfigurator() adminpwd = self._vmware_cust_conf.admin_password @@ -164,27 +180,41 @@ class DataSourceOVF(sources.DataSource): else: LOG.debug("Changing password is not needed") except Exception as e: - LOG.debug("Error applying Password Configuration: %s", e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) - return False - if markerid: - LOG.debug("Handle marker creation") + _raise_error_status( + "Error applying Password Configuration", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + + if customscript: + try: + postcust = PostCustomScript(customscript, imcdirpath) + postcust.execute() + except Exception as e: + _raise_error_status( + "Error executing post-customization script", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + + if product_marker: try: - setup_marker_files(markerid) + setup_marker_files( + product_marker, + os.path.join(self.paths.cloud_dir, 'data')) except Exception as e: - LOG.debug("Error creating marker files: %s", e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) - return False + _raise_error_status( + "Error creating marker files", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) self._vmware_cust_found = True found.append('vmware-tools') # TODO: Need to set the status to DONE only when the # customization is done successfully. + util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) enable_nics(self._vmware_nics_to_enable) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, @@ -539,31 +569,52 @@ def get_datasource_list(depends): # To check if marker file exists -def check_marker_exists(markerid): +def check_marker_exists(markerid, marker_dir): """ Check the existence of a marker file. Presence of marker file determines whether a certain code path is to be executed. It is needed for partial guest customization in VMware. + @param markerid: is an unique string representing a particular product + marker. + @param: marker_dir: The directory in which markers exist. """ if not markerid: return False - markerfile = "/.markerfile-" + markerid + markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") if os.path.exists(markerfile): return True return False # Create a marker file -def setup_marker_files(markerid): +def setup_marker_files(markerid, marker_dir): """ Create a new marker file. Marker files are unique to a full customization workflow in VMware environment. + @param markerid: is an unique string representing a particular product + marker. + @param: marker_dir: The directory in which markers exist. + """ - if not markerid: - return - markerfile = "/.markerfile-" + markerid - util.del_file("/.markerfile-*.txt") + LOG.debug("Handle marker creation") + markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") + for fname in os.listdir(marker_dir): + if fname.startswith(".markerfile"): + util.del_file(os.path.join(marker_dir, fname)) open(markerfile, 'w').close() + +def _raise_error_status(prefix, error, event, config_file): + """ + Raise error and send customization status to the underlying VMware + Virtualization Platform. Also, cleanup the imc directory. + """ + LOG.debug('%s: %s', prefix, error) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + event) + util.del_dir(os.path.dirname(config_file)) + raise error + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index 49d441db..2eaeff34 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -100,4 +100,8 @@ class Config(object): """Returns marker id.""" return self._configFile.get(Config.MARKERID, None) + @property + def custom_script_name(self): + """Return the name of custom (pre/post) script.""" + return self._configFile.get(Config.CUSTOM_SCRIPT, None) # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py new file mode 100644 index 00000000..a7d4ad91 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py @@ -0,0 +1,153 @@ +# Copyright (C) 2017 Canonical Ltd. +# Copyright (C) 2017 VMware Inc. +# +# Author: Maitreyee Saikia +# +# This file is part of cloud-init. See LICENSE file for license information. + +import logging +import os +import stat +from textwrap import dedent + +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class CustomScriptNotFound(Exception): + pass + + +class CustomScriptConstant(object): + RC_LOCAL = "/etc/rc.local" + POST_CUST_TMP_DIR = "/root/.customization" + POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh" + POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR, + POST_CUST_RUN_SCRIPT_NAME) + POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + + +class RunCustomScript(object): + def __init__(self, scriptname, directory): + self.scriptname = scriptname + self.directory = directory + self.scriptpath = os.path.join(directory, scriptname) + + def prepare_script(self): + if not os.path.exists(self.scriptpath): + raise CustomScriptNotFound("Script %s not found!! " + "Cannot execute custom script!" + % self.scriptpath) + # Strip any CR characters from the decoded script + util.load_file(self.scriptpath).replace("\r", "") + st = os.stat(self.scriptpath) + os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC) + + +class PreCustomScript(RunCustomScript): + def execute(self): + """Executing custom script with precustomization argument.""" + LOG.debug("Executing pre-customization script") + self.prepare_script() + util.subp(["/bin/sh", self.scriptpath, "precustomization"]) + + +class PostCustomScript(RunCustomScript): + def __init__(self, scriptname, directory): + super(PostCustomScript, self).__init__(scriptname, directory) + # Determine when to run custom script. When postreboot is True, + # the user uploaded script will run as part of rc.local after + # the machine reboots. This is determined by presence of rclocal. + # When postreboot is False, script will run as part of cloud-init. + self.postreboot = False + + def _install_post_reboot_agent(self, rclocal): + """ + Install post-reboot agent for running custom script after reboot. + As part of this process, we are editing the rclocal file to run a + VMware script, which in turn is resposible for handling the user + script. + @param: path to rc local. + """ + LOG.debug("Installing post-reboot customization from %s to %s", + self.directory, rclocal) + if not self.has_previous_agent(rclocal): + LOG.info("Adding post-reboot customization agent to rc.local") + new_content = dedent(""" + # Run post-reboot guest customization + /bin/sh %s + exit 0 + """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT + existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') + st = os.stat(rclocal) + # "x" flag should be set + mode = st.st_mode | stat.S_IEXEC + util.write_file(rclocal, existing_rclocal + new_content, mode) + + else: + # We don't need to update rclocal file everytime a customization + # is requested. It just needs to be done for the first time. + LOG.info("Post-reboot guest customization agent is already " + "registered in rc.local") + LOG.debug("Installing post-reboot customization agent finished: %s", + self.postreboot) + + def has_previous_agent(self, rclocal): + searchstring = "# Run post-reboot guest customization" + if searchstring in open(rclocal).read(): + return True + return False + + def find_rc_local(self): + """ + Determine if rc local is present. + """ + rclocal = "" + if os.path.exists(CustomScriptConstant.RC_LOCAL): + LOG.debug("rc.local detected.") + # resolving in case of symlink + rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL) + LOG.debug("rc.local resolved to %s", rclocal) + else: + LOG.warning("Can't find rc.local, post-customization " + "will be run before reboot") + return rclocal + + def install_agent(self): + rclocal = self.find_rc_local() + if rclocal: + self._install_post_reboot_agent(rclocal) + self.postreboot = True + + def execute(self): + """ + This method executes post-customization script before or after reboot + based on the presence of rc local. + """ + self.prepare_script() + self.install_agent() + if not self.postreboot: + LOG.warning("Executing post-customization script inline") + util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) + else: + LOG.debug("Scheduling custom script to run post reboot") + if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): + os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) + # Script "post-customize-guest.sh" and user uploaded script are + # are present in the same directory and needs to copied to a temp + # directory to be executed post reboot. User uploaded script is + # saved as customize.sh in the temp directory. + # post-customize-guest.sh excutes customize.sh after reboot. + LOG.debug("Copying post-customization script") + util.copy(self.scriptpath, + CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") + LOG.debug("Copying script to run post-customization script") + util.copy( + os.path.join(self.directory, + CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), + CustomScriptConstant.POST_CUST_RUN_SCRIPT) + LOG.info("Creating post-reboot pending marker") + util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 2fb07c59..2d8900e2 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -161,7 +161,7 @@ class NicConfigurator(object): if nic.primary and v4.gateways: self.ipv4PrimaryGateway = v4.gateways[0] subnet.update({'gateway': self.ipv4PrimaryGateway}) - return [subnet] + return ([subnet], route_list) # Add routes if there is no primary nic if not self._primaryNic: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 700da86c..fc4eb36e 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -5,11 +5,17 @@ # This file is part of cloud-init. See LICENSE file for license information. import base64 -from collections import OrderedDict +import os -from cloudinit.tests import helpers as test_helpers +from collections import OrderedDict +from textwrap import dedent +from cloudinit import util +from cloudinit.tests.helpers import CiTestCase, wrap_and_call +from cloudinit.helpers import Paths from cloudinit.sources import DataSourceOVF as dsovf +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptNotFound) OVF_ENV_CONTENT = """ +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptConstant, + CustomScriptNotFound, + PreCustomScript, + PostCustomScript, +) +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestVmwareCustomScript(CiTestCase): + def setUp(self): + self.tmpDir = self.tmp_dir() + + def test_prepare_custom_script(self): + """ + This test is designed to verify the behavior based on the presence of + custom script. Mainly needed for scenario where a custom script is + expected, but was not properly copied. "CustomScriptNotFound" exception + is raised in such cases. + """ + # Custom script does not exist. + preCust = PreCustomScript("random-vmw-test", self.tmpDir) + self.assertEqual("random-vmw-test", preCust.scriptname) + self.assertEqual(self.tmpDir, preCust.directory) + self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), + preCust.scriptpath) + with self.assertRaises(CustomScriptNotFound): + preCust.prepare_script() + + # Custom script exists. + custScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(custScript, "test-CR-strip/r/r") + postCust = PostCustomScript("test-cust", self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + self.assertFalse(postCust.postreboot) + postCust.prepare_script() + # Check if all carraige returns are stripped from script. + self.assertFalse("/r" in custScript) + + def test_rc_local_exists(self): + """ + This test is designed to verify the different scenarios associated + with the presence of rclocal. + """ + # test when rc local does not exist + postCust = PostCustomScript("test-cust", self.tmpDir) + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", "/no/path"): + rclocal = postCust.find_rc_local() + self.assertEqual("", rclocal) + + # test when rc local exists + rclocalFile = self.tmp_path("vmware-rclocal", self.tmpDir) + util.write_file(rclocalFile, "# Run post-reboot guest customization", + omode="w") + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalFile): + rclocal = postCust.find_rc_local() + self.assertEqual(rclocalFile, rclocal) + self.assertTrue(postCust.has_previous_agent, rclocal) + + # test when rc local is a symlink + rclocalLink = self.tmp_path("dummy-rclocal-link", self.tmpDir) + util.sym_link(rclocalFile, rclocalLink, True) + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalLink): + rclocal = postCust.find_rc_local() + self.assertEqual(rclocalFile, rclocal) + + def test_execute_post_cust(self): + """ + This test is to identify if rclocal was properly populated to be + run after reboot. + """ + customscript = self.tmp_path("vmware-post-cust-script", self.tmpDir) + rclocal = self.tmp_path("vmware-rclocal", self.tmpDir) + # Create a temporary rclocal file + open(customscript, "w") + util.write_file(rclocal, "tests\nexit 0", omode="w") + postCust = PostCustomScript("vmware-post-cust-script", self.tmpDir) + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocal): + # Test that guest customization agent is not installed initially. + self.assertFalse(postCust.postreboot) + self.assertIs(postCust.has_previous_agent(rclocal), False) + postCust.install_agent() + + # Assert rclocal has been modified to have guest customization + # agent. + self.assertTrue(postCust.postreboot) + self.assertTrue(postCust.has_previous_agent, rclocal) + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 0f8cda95..036f6879 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -335,5 +335,12 @@ class TestVmwareConfigFile(CiTestCase): self.assertEqual('255.255.0.0', subnet.get('netmask'), 'Subnet netmask') + def test_custom_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.custom_script_name) + cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script") + conf = Config(cf) + self.assertEqual("test-script", conf.custom_script_name) # vi: ts=4 expandtab -- cgit v1.2.3 From a5dc0f425facf404344fb7baaf2b9136df143ecf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 6 Dec 2017 17:26:52 -0700 Subject: OVF: improve ds-identify to support finding OVF iso transport. Previously the OVF transport would not be identified except for when config files set 'ovf_vmware_guest_customization'. It would also return DS_MAYBE almost always. The change here is to add support to ds-identify for storing the iso9660 filesystems that it finds (ISO9660_DEVS). Then the OVF check will check that the iso9660 filesystem has ovf-env.xml on it. The least wonderful part of this is that the check is done by 'grep' for case insensitive ovf-env.xml. Future improvement would be to identify VMware's OVF by label or UUID so we could avoid the grep. LP: #1731868 --- tests/unittests/test_ds_identify.py | 85 ++++++++++++++++++++++++++- tools/ds-identify | 112 +++++++++++++++++++++++++----------- 2 files changed, 161 insertions(+), 36 deletions(-) (limited to 'tests') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7a920d42..3f1a6712 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -32,6 +32,7 @@ POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=disabled" DI_EC2_STRICT_ID_DEFAULT = "true" +OVF_MATCH_STRING = 'http://schemas.dmtf.org/ovf/environment/1' SHELL_MOCK_TMPL = """\ %(name)s() { @@ -55,6 +56,7 @@ P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} +MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0} @@ -296,6 +298,48 @@ class TestDsIdentify(CiTestCase): data, RC_FOUND, dslist=['OpenStack', 'None']) self.assertIn("check for 'OpenStack' returned maybe", err) + def test_default_ovf_is_found(self): + """OVF is identified found when ovf/ovf-env.xml seed file exists.""" + self._test_ds_found('OVF-seed') + + def test_default_ovf_with_detect_virt_none_not_found(self): + """OVF identifies not found when detect_virt returns "none".""" + self._check_via_dict( + {'ds': 'OVF'}, rc=RC_NOT_FOUND, policy_dmi="disabled") + + def test_default_ovf_returns_not_found_on_azure(self): + """OVF datasource won't be found as false positive on Azure.""" + ovfonazure = copy.deepcopy(VALID_CFG['OVF']) + # Set azure asset tag to assert OVF content not found + ovfonazure['files'][P_CHASSIS_ASSET_TAG] = ( + '7783-7084-3265-9085-8269-3286-77\n') + self._check_via_dict( + ovfonazure, RC_FOUND, dslist=['Azure', DS_NONE]) + + def test_ovf_on_vmware_iso_found_by_cdrom_with_ovf_schema_match(self): + """OVF is identified when iso9660 cdrom path contains ovf schema.""" + self._test_ds_found('OVF') + + def test_ovf_on_vmware_iso_found_when_vmware_customization(self): + """OVF is identified when vmware customization is enabled.""" + self._test_ds_found('OVF-vmware-customization') + + def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): + """OVF is identified when iso9660 cdrom label has ovf-transport.""" + ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF']) + # Unset matching cdrom ovf schema content + ovf_cdrom_by_label['files']['dev/sr0'] = 'No content match' + self._check_via_dict( + ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled") + + # Add recognized labels + for valid_fs_label in ['ovf-transport', 'OVF-TRANSPORT']: + ovf_cdrom_by_label['mocks'][0]['out'] = blkid_out([ + {'DEVNAME': 'sr0', 'TYPE': 'iso9660', + 'LABEL': valid_fs_label}]) + self._check_via_dict( + ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" @@ -305,7 +349,9 @@ def blkid_out(disks=None): for disk in disks: if not disk["DEVNAME"].startswith("/dev/"): disk["DEVNAME"] = "/dev/" + disk["DEVNAME"] - for key in disk: + # devname needs to be first. + lines.append("%s=%s" % ("DEVNAME", disk["DEVNAME"])) + for key in [d for d in disk if d != "DEVNAME"]: lines.append("%s=%s" % (key, disk[key])) lines.append("") return '\n'.join(lines) @@ -383,6 +429,43 @@ VALID_CFG = { 'policy_dmi': POLICY_FOUND_ONLY, 'policy_no_dmi': POLICY_FOUND_ONLY, }, + 'OVF-seed': { + 'ds': 'OVF', + 'files': { + os.path.join(P_SEED_DIR, 'ovf', 'ovf-env.xml'): 'present\n', + } + }, + 'OVF-vmware-customization': { + 'ds': 'OVF', + 'mocks': [ + # Include a mockes iso9660 potential, even though content not ovf + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}]) + }, + MOCK_VIRT_IS_VMWARE, + ], + 'files': { + 'dev/sr0': 'no match', + # Setup vmware customization enabled + 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so': 'here', + 'etc/cloud/cloud.cfg': 'disable_vmware_customization: false\n', + } + }, + 'OVF': { + 'ds': 'OVF', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}]) + }, + MOCK_VIRT_IS_VMWARE, + ], + 'files': { + 'dev/sr0': 'pretend ovf iso has ' + OVF_MATCH_STRING + '\n', + } + }, 'ConfigDrive': { 'ds': 'ConfigDrive', 'mocks': [ diff --git a/tools/ds-identify b/tools/ds-identify index ee5e05a4..4c59d7bc 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -83,6 +83,7 @@ _DI_LOGGED="" # set DI_MAIN='noop' in environment to source this file with no main called. DI_MAIN=${DI_MAIN:-main} +DI_BLKID_OUTPUT="" DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" DI_DMI_CHASSIS_ASSET_TAG="" @@ -91,6 +92,7 @@ DI_DMI_SYS_VENDOR="" DI_DMI_PRODUCT_SERIAL="" DI_DMI_PRODUCT_UUID="" DI_FS_LABELS="" +DI_ISO9660_DEVS="" DI_KERNEL_CMDLINE="" DI_VIRT="" DI_PID_1_PRODUCT_NAME="" @@ -181,32 +183,43 @@ block_dev_with_label() { return 0 } -read_fs_labels() { - cached "${DI_FS_LABELS}" && return 0 +read_fs_info() { + cached "${DI_BLKID_OUTPUT}" && return 0 # do not rely on links in /dev/disk which might not be present yet. # note that older blkid versions do not report DEVNAME in 'export' output. - local out="" ret=0 oifs="$IFS" line="" delim="," - local labels="" if is_container; then # blkid will in a container, or at least currently in lxd # not provide useful information. DI_FS_LABELS="$UNAVAILABLE:container" - else - out=$(blkid -c /dev/null -o export) || { - ret=$? - error "failed running [$ret]: blkid -c /dev/null -o export" - return $ret - } - IFS="$CR" - set -- $out - IFS="$oifs" - for line in "$@"; do - case "${line}" in - LABEL=*) labels="${labels}${line#LABEL=}${delim}";; - esac - done - DI_FS_LABELS="${labels%${delim}}" + DI_ISO9660_DEVS="$UNAVAILABLE:container" + return fi + local oifs="$IFS" line="" delim="," + local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" + out=$(blkid -c /dev/null -o export) || { + ret=$? + error "failed running [$ret]: blkid -c /dev/null -o export" + DI_FS_LABELS="$UNAVAILABLE:error" + DI_ISO9660_DEVS="$UNAVAILABLE:error" + return $ret + } + IFS="$CR" + set -- $out + IFS="$oifs" + for line in "$@" ""; do + case "${line}" in + DEVNAME=*) dev=${line#DEVNAME=};; + LABEL=*) label="${line#LABEL=}"; + labels="${labels}${line#LABEL=}${delim}";; + TYPE=*) ftype=${line#TYPE=};; + "") if [ "$ftype" = "iso9660" ]; then + isodevs="${isodevs} ${dev}=$label" + fi + ftype=""; devname=""; label=""; + esac + done + DI_FS_LABELS="${labels%${delim}}" + DI_ISO9660_DEVS="${isodevs# }" } cached() { @@ -214,10 +227,6 @@ cached() { } -has_cdrom() { - [ -e "${PATH_ROOT}/dev/cdrom" ] -} - detect_virt() { local virt="${UNAVAILABLE}" r="" out="" if [ -d /run/systemd ]; then @@ -621,14 +630,13 @@ ovf_vmware_guest_customization() { [ "${DI_VIRT}" = "vmware" ] || return 1 # we have to have the plugin to do vmware customization - local found="" pkg="" pre="/usr/lib" + local found="" pkg="" pre="${PATH_ROOT}/usr/lib" for pkg in vmware-tools open-vm-tools; do if [ -f "$pre/$pkg/plugins/vmsvc/libdeployPkgPlugin.so" ]; then found="$pkg"; break; fi done [ -n "$found" ] || return 1 - # vmware customization is disabled by default # (disable_vmware_customization=true). If it is set to false, then # user has requested customization. @@ -644,20 +652,55 @@ ovf_vmware_guest_customization() { return 1 } +is_cdrom_ovf() { + local dev="$1" label="$2" + # skip devices that don't look like cdrom paths. + case "$dev" in + /dev/sr[0-9]|/dev/hd[a-z]) :;; + *) debug 1 "skipping iso dev $d" + return 1;; + esac + + # fast path known 'OVF' labels + [ "$label" = "OVF-TRANSPORT" -o "$label" = "ovf-transport" ] && return 0 + + # explicitly skip known labels of other types. rd_rdfe is azure. + case "$label" in + config-2|rd_rdfe_stable*) return 1;; + esac + + local idstr="http://schemas.dmtf.org/ovf/environment/1" + grep --quiet --ignore-case "$idstr" "${PATH_ROOT}$dev" +} + dscheck_OVF() { - local p="" check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" + [ "${DI_VIRT}" = "none" ] && return ${DS_NOT_FOUND} + + # Azure provides ovf. Skip false positive by dis-allowing. + is_azure_chassis && return $DS_NOT_FOUND + + local isodevs="${DI_ISO9660_DEVS}" + case "$isodevs" in + ""|$UNAVAILABLE:*) return ${DS_NOT_FOUND};; + esac + + # DI_ISO9660_DEVS is =label, like /dev/sr0=OVF-TRANSPORT + for tok in $isodevs; do + is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND + done + if ovf_vmware_guest_customization; then return ${DS_FOUND} fi - has_cdrom || return ${DS_NOT_FOUND} + return ${DS_NOT_FOUND} +} - # FIXME: currently just return maybe if there is a cdrom - # ovf iso9660 transport does not specify an fs label. - # better would be to check if - return ${DS_MAYBE} +is_azure_chassis() { + local azure_chassis="7783-7084-3265-9085-8269-3286-77" + dmi_chassis_asset_tag_matches "${azure_chassis}" } dscheck_Azure() { @@ -667,8 +710,7 @@ dscheck_Azure() { # UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209" # TYPE="udf">/dev/sr0 # - local azure_chassis="7783-7084-3265-9085-8269-3286-77" - dmi_chassis_asset_tag_matches "${azure_chassis}" && return $DS_FOUND + is_azure_chassis && return $DS_FOUND check_seed_dir azure ovf-env.xml && return ${DS_FOUND} [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} @@ -930,7 +972,7 @@ collect_info() { read_dmi_product_name read_dmi_product_serial read_dmi_product_uuid - read_fs_labels + read_fs_info } print_info() { @@ -942,7 +984,7 @@ _print_info() { local n="" v="" vars="" vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG" - vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" + vars="$vars FS_LABELS ISO9660_DEVS KERNEL_CMDLINE VIRT" vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" vars="$vars DSNAME DSLIST" -- cgit v1.2.3 From bd0ffd1e115c4f895c82e1115f1f586849925d88 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 11 Dec 2017 09:48:14 -0500 Subject: tests: remove leaked tmp files in config drive tests. Make sure that some temporary files used by the config drive tests get cleaned up properly. --- .../unittests/test_datasource/test_configdrive.py | 57 ++++++++-------------- 1 file changed, 19 insertions(+), 38 deletions(-) (limited to 'tests') diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 98497886..6ef5a35c 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -3,9 +3,6 @@ from copy import copy, deepcopy import json import os -import shutil -import six -import tempfile from cloudinit import helpers from cloudinit.net import eni @@ -15,7 +12,7 @@ from cloudinit.sources import DataSourceConfigDrive as ds from cloudinit.sources.helpers import openstack from cloudinit import util -from cloudinit.tests.helpers import TestCase, ExitStack, mock +from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' @@ -223,12 +220,11 @@ CFG_DRIVE_FILES_V2 = { 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} -class TestConfigDriveDataSource(TestCase): +class TestConfigDriveDataSource(CiTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + self.tmp = self.tmp_dir() def test_ec2_metadata(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) @@ -469,31 +465,27 @@ class TestConfigDriveDataSource(TestCase): @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') def test_pubkeys_v2(self, on_first_boot): """Verify that public-keys work in config-drive-v2.""" - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - myds = cfg_ds_from_dir(self.tmp) + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertEqual(myds.get_public_ssh_keys(), [OSTACK_META['public_keys']['mykey']]) -class TestNetJson(TestCase): +class TestNetJson(CiTestCase): def setUp(self): super(TestNetJson, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + self.tmp = self.tmp_dir() self.maxDiff = None @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') def test_network_data_is_found(self, on_first_boot): """Verify that network_data is present in ds in config-drive-v2.""" - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - myds = cfg_ds_from_dir(self.tmp) + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertIsNotNone(myds.network_json) @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') def test_network_config_is_converted(self, on_first_boot): """Verify that network_data is converted and present on ds object.""" - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - myds = cfg_ds_from_dir(self.tmp) + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) network_config = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS) self.assertEqual(myds.network_config, network_config) @@ -598,11 +590,10 @@ class TestNetJson(TestCase): self.assertEqual(out_data, conv_data) -class TestConvertNetworkData(TestCase): +class TestConvertNetworkData(CiTestCase): def setUp(self): super(TestConvertNetworkData, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + self.tmp = self.tmp_dir() def _getnames_in_config(self, ncfg): return set([n['name'] for n in ncfg['config'] @@ -724,15 +715,18 @@ class TestConvertNetworkData(TestCase): self.assertEqual(expected, config_name2mac) -def cfg_ds_from_dir(seed_d): - tmp = tempfile.mkdtemp() - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, - helpers.Paths({'run_dir': tmp})) - cfg_ds.seed_dir = seed_d +def cfg_ds_from_dir(base_d, files=None): + run = os.path.join(base_d, "run") + os.mkdir(run) + cfg_ds = ds.DataSourceConfigDrive( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run})) + cfg_ds.seed_dir = os.path.join(base_d, "seed") + if files: + populate_dir(cfg_ds.seed_dir, files) cfg_ds.known_macs = KNOWN_MACS.copy() if not cfg_ds.get_data(): raise RuntimeError("Data source did not extract itself from" - " seed directory %s" % seed_d) + " seed directory %s" % cfg_ds.seed_dir) return cfg_ds @@ -750,17 +744,4 @@ def populate_ds_from_read_config(cfg_ds, source, results): cfg_ds.network_json, known_macs=KNOWN_MACS) -def populate_dir(seed_dir, files): - for (name, content) in files.items(): - path = os.path.join(seed_dir, name) - dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) - if isinstance(content, six.text_type): - mode = "w" - else: - mode = "wb" - with open(path, mode) as fp: - fp.write(content) - # vi: ts=4 expandtab -- cgit v1.2.3 From 703241a3c50f2cfec21e7c8e90616c3378ebbea2 Mon Sep 17 00:00:00 2001 From: Andrew Jorgensen Date: Mon, 27 Nov 2017 21:54:09 +0000 Subject: ec2: Use instance-identity doc for region and instance-id The instance identity document is a better source for region information, partly because region isn't actually in meta-data at all, only availability-zone, which happens to be named similarly. Reviewed-by: Ethan Faust Reviewed-by: Cyle Riggs Reviewed-by: Tom Kirchner Reviewed-by: Matt Nierzwicki [ajorgens@amazon.com: rebase onto 0.7.9] [ajorgens@amazon.com: changes per merge proposal discussions] --- cloudinit/distros/__init__.py | 15 +++++----- cloudinit/ec2_utils.py | 39 ++++++++++++++++++++------ cloudinit/sources/DataSourceEc2.py | 30 ++++++++++++++++---- tests/unittests/test_datasource/test_aliyun.py | 16 +++++++++++ 4 files changed, 78 insertions(+), 22 deletions(-) (limited to 'tests') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 99e60e7a..55260eae 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -45,6 +45,10 @@ OSFAMILIES = { LOG = logging.getLogger(__name__) +# This is a best guess regex, based on current EC2 AZs on 2017-12-11. +# It could break when Amazon adds new regions and new AZs. +_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') + @six.add_metaclass(abc.ABCMeta) class Distro(object): @@ -683,18 +687,13 @@ def _get_package_mirror_info(mirror_info, data_source=None, if not mirror_info: mirror_info = {} - # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) - # the region is us-east-1. so region = az[0:-1] - directions_re = '|'.join([ - 'central', 'east', 'north', 'northeast', 'northwest', - 'south', 'southeast', 'southwest', 'west']) - ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re) - subst = {} if data_source and data_source.availability_zone: subst['availability_zone'] = data_source.availability_zone - if re.match(ec2_az_re, data_source.availability_zone): + # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) + # the region is us-east-1. so region = az[0:-1] + if _EC2_AZ_RE.match(data_source.availability_zone): subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] if data_source and data_source.region: diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 723d6bd6..d6c61e4c 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -1,6 +1,8 @@ # Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2014 Amazon.com, Inc. or its affiliates. # # Author: Joshua Harlow +# Author: Andrew Jorgensen # # This file is part of cloud-init. See LICENSE file for license information. @@ -164,14 +166,11 @@ def get_instance_userdata(api_version='latest', return user_data -def get_instance_metadata(api_version='latest', - metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): - md_url = url_helper.combine_url(metadata_address, api_version) - # Note, 'meta-data' explicitly has trailing /. - # this is required for CloudStack (LP: #1356855) - md_url = url_helper.combine_url(md_url, 'meta-data/') +def _get_instance_metadata(tree, api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): + md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial(util.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries) @@ -189,7 +188,29 @@ def get_instance_metadata(api_version='latest', md = {} return md except Exception: - util.logexc(LOG, "Failed fetching metadata from url %s", md_url) + util.logexc(LOG, "Failed fetching %s from url %s", tree, md_url) return {} + +def get_instance_metadata(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): + # Note, 'meta-data' explicitly has trailing /. + # this is required for CloudStack (LP: #1356855) + return _get_instance_metadata(tree='meta-data/', api_version=api_version, + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, + retries=retries, leaf_decoder=leaf_decoder) + + +def get_instance_identity(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): + return _get_instance_metadata(tree='dynamic/instance-identity', + api_version=api_version, + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, + retries=retries, leaf_decoder=leaf_decoder) # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index e5c88334..0f89f34d 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -154,7 +154,12 @@ class DataSourceEc2(sources.DataSource): return self.min_metadata_version def get_instance_id(self): - return self.metadata['instance-id'] + if self.cloud_platform == Platforms.AWS: + # Prefer the ID from the instance identity document, but fall back + return self.identity.get( + 'instanceId', self.metadata['instance-id']) + else: + return self.metadata['instance-id'] def _get_url_settings(self): mcfg = self.ds_cfg @@ -268,15 +273,27 @@ class DataSourceEc2(sources.DataSource): @property def availability_zone(self): try: - return self.metadata['placement']['availability-zone'] + if self.cloud_platform == Platforms.AWS: + return self.identity.get( + 'availabilityZone', + self.metadata['placement']['availability-zone']) + else: + return self.metadata['placement']['availability-zone'] except KeyError: return None @property def region(self): - az = self.availability_zone - if az is not None: - return az[:-1] + if self.cloud_platform == Platforms.AWS: + region = self.identity.get('region') + # Fallback to trimming the availability zone if region is missing + if self.availability_zone and not region: + region = self.availability_zone[:-1] + return region + else: + az = self.availability_zone + if az is not None: + return az[:-1] return None @property @@ -357,6 +374,9 @@ class DataSourceEc2(sources.DataSource): api_version, self.metadata_address) self.metadata = ec2.get_instance_metadata( api_version, self.metadata_address) + if self.cloud_platform == Platforms.AWS: + self.identity = ec2.get_instance_identity( + api_version, self.metadata_address).get('document', {}) except Exception: util.logexc( LOG, "Failed reading from metadata address %s", diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index 714f5dac..4fa9616b 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -47,6 +47,9 @@ def register_mock_metaserver(base_url, data): elif isinstance(body, list): register(base_url.rstrip('/'), '\n'.join(body) + '\n') elif isinstance(body, dict): + if not body: + register(base_url.rstrip('/') + '/', 'not found', + status_code=404) vals = [] for k, v in body.items(): if isinstance(v, (str, list)): @@ -91,9 +94,22 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): self.metadata_address, self.ds.min_metadata_version, 'user-data') + # EC2 provides an instance-identity document which must return 404 here + # for this test to pass. + @property + def default_identity(self): + return {} + + @property + def identity_url(self): + return os.path.join(self.metadata_address, + self.ds.min_metadata_version, + 'dynamic', 'instance-identity') + def regist_default_server(self): register_mock_metaserver(self.metadata_url, self.default_metadata) register_mock_metaserver(self.userdata_url, self.default_userdata) + register_mock_metaserver(self.identity_url, self.default_identity) def _test_get_data(self): self.assertEqual(self.ds.metadata, self.default_metadata) -- cgit v1.2.3 From b63ee73da874de68ff2019570e12df2a39d4626b Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 12 Dec 2017 11:28:05 -0700 Subject: tests: fix collect_console when not implemented The exception was incorrectly creating a string and not a bytes object. --- tests/cloud_tests/collect.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 4805cea1..bb722457 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -31,8 +31,8 @@ def collect_console(instance, base_dir): LOG.debug('getting console log') try: data = instance.console_log() - except NotImplementedError as e: - data = 'Not Implemented: %s' % e + except NotImplementedError: + data = b'instance.console_log: not implemented' with open(os.path.join(base_dir, 'console.log'), "wb") as fp: fp.write(data) -- cgit v1.2.3 From a30a3bb5baec4da1d8f91385849e9b5b826678bf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 12 Dec 2017 11:41:26 -0700 Subject: ds-identify: failure in NoCloud due to unset variable usage. The previous OVF datasource change added a debug message that referenced an un-used variable. The failure path would be triggered if an image was booted with a iso9660 filesystem attached to a device that was not a cdrom. A unit test is added for the specific failure found. Additional safety to avoid 'cidata' labels is also added to the OVF checker. LP: #1737704 --- tests/unittests/test_ds_identify.py | 25 +++++++++++++++++++++++++ tools/ds-identify | 4 ++-- 2 files changed, 27 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 3f1a6712..c9234edd 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -27,6 +27,14 @@ TYPE=ext4 PARTUUID=30c65c77-e07d-4039-b2fb-88b1fb5fa1fc """ +# this is a Ubuntu 18.04 disk.img output (dual uefi and bios bootable) +BLKID_UEFI_UBUNTU = [ + {'DEVNAME': 'vda1', 'TYPE': 'ext4', 'PARTUUID': uuid4(), 'UUID': uuid4()}, + {'DEVNAME': 'vda14', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda15', 'TYPE': 'vfat', 'LABEL': 'UEFI', 'PARTUUID': uuid4(), + 'UUID': '5F55-129B'}] + + POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled" POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" @@ -340,6 +348,10 @@ class TestDsIdentify(CiTestCase): self._check_via_dict( ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def test_default_nocloud_as_vdb_iso9660(self): + """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" + self._test_ds_found('NoCloud') + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" @@ -422,6 +434,19 @@ VALID_CFG = { 'files': {P_PRODUCT_SERIAL: 'GoogleCloud-8f2e88f\n'}, 'mocks': [MOCK_VIRT_IS_KVM], }, + 'NoCloud': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'OpenStack': { 'ds': 'OpenStack', 'files': {P_PRODUCT_NAME: 'OpenStack Nova\n'}, diff --git a/tools/ds-identify b/tools/ds-identify index 4c59d7bc..5893a761 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -657,7 +657,7 @@ is_cdrom_ovf() { # skip devices that don't look like cdrom paths. case "$dev" in /dev/sr[0-9]|/dev/hd[a-z]) :;; - *) debug 1 "skipping iso dev $d" + *) debug 1 "skipping iso dev $dev" return 1;; esac @@ -666,7 +666,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|rd_rdfe_stable*) return 1;; + config-2|rd_rdfe_stable*|cidata) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From 4089e20c0a20bc2ad5c21b106687c4f3faf84b4b Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 14 Dec 2017 22:06:29 -0700 Subject: cli: Fix error in cloud-init modules --mode=init. The cli help docs and argument parser allow the 'init' mode value which caused a traceback. Fix the cli to support 'init', 'config' and 'final' modes for the cloud-init modules subcommand. Add a check in the cli to raise a ValueError if a new subcommand ends up allowing an unsupported/unimplemented modes. Drive by unit test additions for a bit better coverage of error handling. LP: #1736600 --- cloudinit/cmd/main.py | 18 +++++++---- tests/unittests/test_cli.py | 75 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index aa56225d..30b37fe1 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -603,7 +603,11 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ('init', 'init-local', 'modules-config', 'modules-final') + modes = ('init', 'init-local', 'modules-init', 'modules-config', + 'modules-final') + if mode not in modes: + raise ValueError( + "Invalid cloud init mode specified '{0}'".format(mode)) status = None if mode == 'init-local': @@ -615,16 +619,18 @@ def status_wrapper(name, args, data_d=None, link_d=None): except Exception: pass + nullstatus = { + 'errors': [], + 'start': None, + 'finished': None, + } if status is None: - nullstatus = { - 'errors': [], - 'start': None, - 'finished': None, - } status = {'v1': {}} for m in modes: status['v1'][m] = nullstatus.copy() status['v1']['datasource'] = None + elif mode not in status['v1']: + status['v1'][mode] = nullstatus.copy() v1 = status['v1'] v1['stage'] = mode diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index a8d28ae6..0c0f427a 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,9 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +from collections import namedtuple +import os import six from cloudinit.cmd import main as cli from cloudinit.tests import helpers as test_helpers +from cloudinit.util import load_file, load_json mock = test_helpers.mock @@ -11,6 +14,8 @@ mock = test_helpers.mock class TestCLI(test_helpers.FilesystemMockingTestCase): + with_logs = True + def setUp(self): super(TestCLI, self).setUp() self.stderr = six.StringIO() @@ -24,6 +29,76 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): except SystemExit as e: return e.code + def test_status_wrapper_errors_on_invalid_name(self): + """status_wrapper will error when the name parameter is not valid. + + Valid name values are only init and modules. + """ + tmpd = self.tmp_dir() + data_d = self.tmp_path('data', tmpd) + link_d = self.tmp_path('link', tmpd) + FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + + def myaction(): + raise Exception('Should not call myaction') + + myargs = FakeArgs(('doesnotmatter', myaction), False, 'bogusmode') + with self.assertRaises(ValueError) as cm: + cli.status_wrapper('init1', myargs, data_d, link_d) + self.assertEqual('unknown name: init1', str(cm.exception)) + self.assertNotIn('Should not call myaction', self.logs.getvalue()) + + def test_status_wrapper_errors_on_invalid_modes(self): + """status_wrapper will error if a parameter combination is invalid.""" + tmpd = self.tmp_dir() + data_d = self.tmp_path('data', tmpd) + link_d = self.tmp_path('link', tmpd) + FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + + def myaction(): + raise Exception('Should not call myaction') + + myargs = FakeArgs(('modules_name', myaction), False, 'bogusmode') + with self.assertRaises(ValueError) as cm: + cli.status_wrapper('modules', myargs, data_d, link_d) + self.assertEqual( + "Invalid cloud init mode specified 'modules-bogusmode'", + str(cm.exception)) + self.assertNotIn('Should not call myaction', self.logs.getvalue()) + + def test_status_wrapper_init_local_writes_fresh_status_info(self): + """When running in init-local mode, status_wrapper writes status.json. + + Old status and results artifacts are also removed. + """ + tmpd = self.tmp_dir() + data_d = self.tmp_path('data', tmpd) + link_d = self.tmp_path('link', tmpd) + status_link = self.tmp_path('status.json', link_d) + # Write old artifacts which will be removed or updated. + for _dir in data_d, link_d: + test_helpers.populate_dir( + _dir, {'status.json': 'old', 'result.json': 'old'}) + + FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + + def myaction(name, args): + # Return an error to watch status capture them + return 'SomeDatasource', ['an error'] + + myargs = FakeArgs(('ignored_name', myaction), True, 'bogusmode') + cli.status_wrapper('init', myargs, data_d, link_d) + # No errors reported in status + status_v1 = load_json(load_file(status_link))['v1'] + self.assertEqual(['an error'], status_v1['init-local']['errors']) + self.assertEqual('SomeDatasource', status_v1['datasource']) + self.assertFalse( + os.path.exists(self.tmp_path('result.json', data_d)), + 'unexpected result.json found') + self.assertFalse( + os.path.exists(self.tmp_path('result.json', link_d)), + 'unexpected result.json link found') + def test_no_arguments_shows_usage(self): exit_code = self._call_main() self.assertIn('usage: cloud-init', self.stderr.getvalue()) -- cgit v1.2.3 From b05b9972d20ec3ea699d1691b67314d04e852d2f Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 20 Dec 2017 12:46:49 -0700 Subject: Azure: Only bounce network when necessary. This fixes a traceback when attempting to bounce the network after hostname resets. In artful and bionic ifupdown package is no longer installed in default cloud images. As such, Azure can't use those tools to bounce the network informing DDNS about hostname changes. This doesn't affect DDNS updates though because systemd-networkd is now watching hostname deltas and with default behavior to SendHostname=True over dhcp for all hostname updates which publishes DDNS for us. LP: #1722668 --- cloudinit/sources/DataSourceAzure.py | 25 +++++++++++++++--------- tests/unittests/test_datasource/test_azure.py | 28 +++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 13 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e73b57b9..d1d09757 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -26,10 +26,16 @@ DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] AGENT_START_BUILTIN = "__builtin__" -BOUNCE_COMMAND = [ +BOUNCE_COMMAND_IFUP = [ 'sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" ] +BOUNCE_COMMAND_FREEBSD = [ + 'sh', '-xc', + ("i=$interface; x=0; ifconfig down $i || x=$?; " + "ifconfig up $i || x=$?; exit $x") +] + # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' @@ -177,11 +183,6 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") - BOUNCE_COMMAND = [ - 'sh', '-xc', - ("i=$interface; x=0; ifconfig down $i || x=$?; " - "ifconfig up $i || x=$?; exit $x") - ] BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -190,7 +191,7 @@ BUILTIN_DS_CONFIG = { 'hostname_bounce': { 'interface': DEFAULT_PRIMARY_NIC, 'policy': True, - 'command': BOUNCE_COMMAND, + 'command': 'builtin', 'hostname_command': 'hostname', }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, @@ -606,8 +607,14 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): env['old_hostname'] = prev_hostname if command == "builtin": - command = BOUNCE_COMMAND - + if util.is_FreeBSD(): + command = BOUNCE_COMMAND_FREEBSD + elif util.which('ifup'): + command = BOUNCE_COMMAND_IFUP + else: + LOG.debug( + "Skipping network bounce: ifupdown utils aren't present.") + return # Don't bounce as networkd handles hostname DDNS updates LOG.debug("pubhname: publishing hostname [%s]", msg) shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 5ab48897..6341e1e8 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -174,6 +174,7 @@ scbus-1 on xpt0 bus 0 (dsaz, 'get_hostname', mock.MagicMock()), (dsaz, 'set_hostname', mock.MagicMock()), (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), + (dsaz.util, 'which', lambda x: True), (dsaz.util, 'read_dmi_data', mock.MagicMock( side_effect=_dmi_mocks)), (dsaz.util, 'wait_for_files', mock.MagicMock( @@ -642,6 +643,8 @@ fdescfs /dev/fd fdescfs rw 0 0 class TestAzureBounce(CiTestCase): + with_logs = True + def mock_out_azure_moving_parts(self): self.patches.enter_context( mock.patch.object(dsaz, 'invoke_agent')) @@ -653,6 +656,8 @@ class TestAzureBounce(CiTestCase): self.patches.enter_context( mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) + self.patches.enter_context( + mock.patch.object(dsaz.util, 'which', lambda x: True)) def _dmi_mocks(key): if key == 'system-uuid': @@ -753,6 +758,22 @@ class TestAzureBounce(CiTestCase): self.assertTrue(ret) self.assertEqual(1, perform_hostname_bounce.call_count) + def test_bounce_skipped_on_ifupdown_absent(self): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'force'}} + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), + agent_command=['not', '__builtin__']) + patch_path = 'cloudinit.sources.DataSourceAzure.util.which' + with mock.patch(patch_path) as m_which: + m_which.return_value = None + ret = self._get_and_setup(dsrc) + self.assertEqual([mock.call('ifup')], m_which.call_args_list) + self.assertTrue(ret) + self.assertIn( + "Skipping network bounce: ifupdown utils aren't present.", + self.logs.getvalue()) + def test_different_hostnames_sets_hostname(self): expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' @@ -817,9 +838,7 @@ class TestAzureBounce(CiTestCase): self.assertEqual(hostname, bounce_env['hostname']) self.assertEqual(old_hostname, bounce_env['old_hostname']) - def test_default_bounce_command_used_by_default(self): - cmd = 'default-bounce-command' - dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd + def test_default_bounce_command_ifup_used_by_default(self): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) @@ -827,7 +846,8 @@ class TestAzureBounce(CiTestCase): self.assertTrue(ret) self.assertEqual(1, self.subp.call_count) bounce_args = self.subp.call_args[1]['args'] - self.assertEqual(cmd, bounce_args) + self.assertEqual( + dsaz.BOUNCE_COMMAND_IFUP, bounce_args) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') def test_set_hostname_option_can_disable_bounce( -- cgit v1.2.3 From 25ddc98e8dcd37272825f7044cf4487e3ade126b Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Wed, 15 Nov 2017 16:03:24 -0500 Subject: SUSE: Add a basic test of network config rendering. This simply increases test coverage in rendering of network config for SUSE and SLES. --- tests/unittests/test_distros/test_netconfig.py | 49 ++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 8d0b2634..1c2e45fe 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -2,6 +2,8 @@ import os from six import StringIO +import stat +from textwrap import dedent try: from unittest import mock @@ -12,13 +14,12 @@ try: except ImportError: from contextlib2 import ExitStack -from cloudinit.tests.helpers import TestCase - from cloudinit import distros from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit import helpers from cloudinit.net import eni from cloudinit import settings +from cloudinit.tests.helpers import FilesystemMockingTestCase from cloudinit import util @@ -175,7 +176,7 @@ class WriteBuffer(object): return self.buffer.getvalue() -class TestNetCfgDistro(TestCase): +class TestNetCfgDistro(FilesystemMockingTestCase): frbsd_ifout = """\ hn0: flags=8843 metric 0 mtu 1500 @@ -771,4 +772,46 @@ ifconfig_vtnet0="DHCP" self.assertCfgEquals(expected_buf, str(write_buf)) self.assertEqual(write_buf.mode, 0o644) + def test_simple_write_opensuse(self): + """Opensuse network rendering writes appropriate sysconfg files.""" + tmpdir = self.tmp_dir() + self.patchOS(tmpdir) + self.patchUtils(tmpdir) + distro = self._get_distro('opensuse') + + distro.apply_network(BASE_NET_CFG, False) + + lo_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-lo') + eth0_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth0') + eth1_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth1') + expected_cfgs = { + lo_path: dedent(''' + STARTMODE="auto" + USERCONTROL="no" + FIREWALL="no" + '''), + eth0_path: dedent(''' + BOOTPROTO="static" + BROADCAST="192.168.1.0" + GATEWAY="192.168.1.254" + IPADDR="192.168.1.5" + NETMASK="255.255.255.0" + STARTMODE="auto" + USERCONTROL="no" + ETHTOOL_OPTIONS="" + '''), + eth1_path: dedent(''' + BOOTPROTO="dhcp" + STARTMODE="auto" + USERCONTROL="no" + ETHTOOL_OPTIONS="" + ''') + } + for cfgpath in (lo_path, eth0_path, eth1_path): + self.assertCfgEquals( + expected_cfgs[cfgpath], + util.load_file(cfgpath)) + file_stat = os.stat(cfgpath) + self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + # vi: ts=4 expandtab -- cgit v1.2.3 From 34595e9b4abacc10ac599aad97c95861af34ea54 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 7 Dec 2017 12:54:46 -0800 Subject: tests: Enable AWS EC2 Integration Testing This enables integration tests to utilize AWS EC2 as a testing platform by utilizing the boto3 Python library. Usage will create and delete a custom VPC for every run. All resources will be tagged with the ec2 tag, 'cii', and the date (e.g. cii-20171220-102452). The VPC is setup with both IPv4 and IPv6 capabilities, but will only hand out IPv4 addresses by default. Instances will have complete Internet access and have full ingress and egress access (i.e. no firewall). SSH keys are generated with each run of the integration tests with the key getting uploaded to AWS at the start of tests and deleted on exit. To enable creation when the platform is setup the SSH generation code is moved to be completed by the platform setup and not during image setup. The nocloud-kvm platform was updated with this change. Creating a custom image will utilize the same clean script, boot_clean_script, that the LXD platform uses as well. The custom AMI is generated, used, and de-registered after a test run. The default instance type is set to t2.micro. This is one of the smallest instance types and is free tier eligible. The default timeout for ec2 was increased to 300 from 120 as many tests hit up against the 2 minute timeout and depending on region load can go over. Documentation for the AWS platform was added with the expected configuration files for the platform to be used. There are some additional whitespace changes included as well. pylint exception was added for paramiko and simplestreams. In the past these were not already flagged due to no __init__.py in the subdirectories of files that used these. boto3 was added to the list of dependencies in the tox ci-test runner. In order to grab console logs on EC2 the harness will now shut down an instance before terminating and before collecting the console log. This is to address a behavior of EC2 where the console log is refreshed very infrequently, but one point when it is refreshed is after shutdown. --- .pylintrc | 2 +- doc/rtd/topics/tests.rst | 38 +++- tests/cloud_tests/collect.py | 19 +- tests/cloud_tests/platforms.yaml | 11 +- tests/cloud_tests/platforms/__init__.py | 2 + tests/cloud_tests/platforms/ec2/image.py | 109 ++++++++++ tests/cloud_tests/platforms/ec2/instance.py | 126 +++++++++++ tests/cloud_tests/platforms/ec2/platform.py | 231 +++++++++++++++++++++ tests/cloud_tests/platforms/ec2/snapshot.py | 66 ++++++ tests/cloud_tests/platforms/instances.py | 70 ++++++- tests/cloud_tests/platforms/nocloudkvm/instance.py | 88 ++++---- tests/cloud_tests/platforms/nocloudkvm/platform.py | 4 + tests/cloud_tests/platforms/platforms.py | 69 ++++++ tests/cloud_tests/releases.yaml | 8 +- tests/cloud_tests/setup_image.py | 18 -- tests/cloud_tests/util.py | 17 +- tox.ini | 1 + 17 files changed, 784 insertions(+), 95 deletions(-) create mode 100644 tests/cloud_tests/platforms/ec2/image.py create mode 100644 tests/cloud_tests/platforms/ec2/instance.py create mode 100644 tests/cloud_tests/platforms/ec2/platform.py create mode 100644 tests/cloud_tests/platforms/ec2/snapshot.py (limited to 'tests') diff --git a/.pylintrc b/.pylintrc index 3ad36924..05a086d9 100644 --- a/.pylintrc +++ b/.pylintrc @@ -46,7 +46,7 @@ reports=no # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. -ignored-modules=six.moves,pkg_resources,httplib,http.client +ignored-modules=six.moves,pkg_resources,httplib,http.client,paramiko,simplestreams # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst index d668e3f4..bf04bb3c 100644 --- a/doc/rtd/topics/tests.rst +++ b/doc/rtd/topics/tests.rst @@ -118,19 +118,19 @@ TreeRun and TreeCollect If working on a cloud-init feature or resolving a bug, it may be useful to run the current copy of cloud-init in the integration testing environment. -The integration testing suite can automatically build a deb based on the +The integration testing suite can automatically build a deb based on the current working tree of cloud-init and run the test suite using this deb. The ``tree_run`` and ``tree_collect`` commands take the same arguments as -the ``run`` and ``collect`` commands. These commands will build a deb and -write it into a temporary file, then start the test suite and pass that deb +the ``run`` and ``collect`` commands. These commands will build a deb and +write it into a temporary file, then start the test suite and pass that deb in. To build a deb only, and not run the test suite, the ``bddeb`` command can be used. Note that code in the cloud-init working tree that has not been committed when the cloud-init deb is built will still be included. To build a cloud-init deb from or use the ``tree_run`` command using a copy of -cloud-init located in a different directory, use the option ``--cloud-init +cloud-init located in a different directory, use the option ``--cloud-init /path/to/cloud-init``. .. code-block:: bash @@ -383,7 +383,7 @@ Development Checklist * Valid unit tests validating output collected * Passes pylint & pep8 checks * Placed in the appropriate sub-folder in the test cases directory -* Tested by running the test: +* Tested by running the test: .. code-block:: bash @@ -392,6 +392,32 @@ Development Checklist --test modules/your_test.yaml \ [--deb ] + +Platforms +========= + +EC2 +--- +To run on the EC2 platform it is required that the user has an AWS credentials +configuration file specifying his or her access keys and a default region. +These configuration files are the standard that the AWS cli and other AWS +tools utilize for interacting directly with AWS itself and are normally +generated when running ``aws configure``: + +.. code-block:: bash + + $ cat $HOME/.aws/credentials + [default] + aws_access_key_id = + aws_secret_access_key = + +.. code-block:: bash + + $ cat $HOME/.aws/config + [default] + region = us-west-2 + + Architecture ============ @@ -455,7 +481,7 @@ replace the default. If the data is a dictionary then the value will be the result of merging that dictionary from the default config and that dictionary from the overrides. -Merging is done using the function +Merging is done using the function ``tests.cloud_tests.config.merge_config``, which can be examined for more detail on config merging behavior. diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index bb722457..33acbb1e 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -28,12 +28,18 @@ def collect_script(instance, base_dir, script, script_name): def collect_console(instance, base_dir): - LOG.debug('getting console log') + """Collect instance console log. + + @param instance: instance to get console log for + @param base_dir: directory to write console log to + """ + logfile = os.path.join(base_dir, 'console.log') + LOG.debug('getting console log for %s to %s', instance, logfile) try: data = instance.console_log() except NotImplementedError: data = b'instance.console_log: not implemented' - with open(os.path.join(base_dir, 'console.log'), "wb") as fp: + with open(logfile, "wb") as fp: fp.write(data) @@ -89,12 +95,11 @@ def collect_test_data(args, snapshot, os_name, test_name): test_output_dir, script, script_name)) for script_name, script in test_scripts.items()] - console_log = partial( - run_single, 'collect console', - partial(collect_console, instance, test_output_dir)) - res = run_stage('collect for test: {}'.format(test_name), - [start_call] + collect_calls + [console_log]) + [start_call] + collect_calls) + + instance.shutdown() + collect_console(instance, test_output_dir) return res diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml index fa4f845e..cb1c904b 100644 --- a/tests/cloud_tests/platforms.yaml +++ b/tests/cloud_tests/platforms.yaml @@ -6,8 +6,13 @@ default_platform_config: get_image_timeout: 300 # maximum time to create instance (before waiting for cloud-init) create_instance_timeout: 60 - + private_key: id_rsa + public_key: id_rsa.pub platforms: + ec2: + enabled: true + instance-type: t2.micro + tag: cii lxd: enabled: true # overrides for image templates @@ -61,9 +66,5 @@ platforms: {{ config_get("user.vendor-data", properties.default) }} nocloud-kvm: enabled: true - private_key: id_rsa - public_key: id_rsa.pub - ec2: {} - azure: {} # vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py index 92ed1627..a01e51ac 100644 --- a/tests/cloud_tests/platforms/__init__.py +++ b/tests/cloud_tests/platforms/__init__.py @@ -2,10 +2,12 @@ """Main init.""" +from .ec2 import platform as ec2 from .lxd import platform as lxd from .nocloudkvm import platform as nocloudkvm PLATFORMS = { + 'ec2': ec2.EC2Platform, 'nocloud-kvm': nocloudkvm.NoCloudKVMPlatform, 'lxd': lxd.LXDPlatform, } diff --git a/tests/cloud_tests/platforms/ec2/image.py b/tests/cloud_tests/platforms/ec2/image.py new file mode 100644 index 00000000..53706b1d --- /dev/null +++ b/tests/cloud_tests/platforms/ec2/image.py @@ -0,0 +1,109 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""EC2 Image Base Class.""" + +from ..images import Image +from .snapshot import EC2Snapshot +from tests.cloud_tests import LOG + + +class EC2Image(Image): + """EC2 backed image.""" + + platform_name = 'ec2' + + def __init__(self, platform, config, image_ami): + """Set up image. + + @param platform: platform object + @param config: image configuration + @param image_ami: string of image ami ID + """ + super(EC2Image, self).__init__(platform, config) + self._img_instance = None + self.image_ami = image_ami + + @property + def _instance(self): + """Internal use only, returns a running instance""" + if not self._img_instance: + self._img_instance = self.platform.create_instance( + self.properties, self.config, self.features, + self.image_ami, user_data=None) + self._img_instance.start(wait=True, wait_for_cloud_init=True) + return self._img_instance + + @property + def properties(self): + """Dictionary containing: 'arch', 'os', 'version', 'release'.""" + return { + 'arch': self.config['arch'], + 'os': self.config['family'], + 'release': self.config['release'], + 'version': self.config['version'], + } + + def destroy(self): + """Delete the instance used to create a custom image.""" + if self._img_instance: + LOG.debug('terminating backing instance %s', + self._img_instance.instance.instance_id) + self._img_instance.instance.terminate() + self._img_instance.instance.wait_until_terminated() + + super(EC2Image, self).destroy() + + def _execute(self, *args, **kwargs): + """Execute command in image, modifying image.""" + self._instance.start(wait=True) + return self._instance._execute(*args, **kwargs) + + def push_file(self, local_path, remote_path): + """Copy file at 'local_path' to instance at 'remote_path'.""" + self._instance.start(wait=True) + return self._instance.push_file(local_path, remote_path) + + def run_script(self, *args, **kwargs): + """Run script in image, modifying image. + + @return_value: script output + """ + self._instance.start(wait=True) + return self._instance.run_script(*args, **kwargs) + + def snapshot(self): + """Create snapshot of image, block until done. + + Will return base image_ami if no instance has been booted, otherwise + will run the clean script, shutdown the instance, create a custom + AMI, and use that AMI once available. + """ + if not self._img_instance: + return EC2Snapshot(self.platform, self.properties, self.config, + self.features, self.image_ami, + delete_on_destroy=False) + + if self.config.get('boot_clean_script'): + self._img_instance.run_script(self.config.get('boot_clean_script')) + + self._img_instance.shutdown(wait=True) + + LOG.debug('creating custom ami from instance %s', + self._img_instance.instance.instance_id) + response = self.platform.ec2_client.create_image( + Name='%s-%s' % (self.platform.tag, self.image_ami), + InstanceId=self._img_instance.instance.instance_id + ) + image_ami_edited = response['ImageId'] + + # Create image and wait until it is in the 'available' state + image = self.platform.ec2_resource.Image(image_ami_edited) + image.wait_until_exists() + waiter = self.platform.ec2_client.get_waiter('image_available') + waiter.wait(ImageIds=[image.id]) + image.reload() + + return EC2Snapshot(self.platform, self.properties, self.config, + self.features, image_ami_edited) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/ec2/instance.py b/tests/cloud_tests/platforms/ec2/instance.py new file mode 100644 index 00000000..4ba737ab --- /dev/null +++ b/tests/cloud_tests/platforms/ec2/instance.py @@ -0,0 +1,126 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base EC2 instance.""" +import os + +import botocore + +from ..instances import Instance +from tests.cloud_tests import LOG, util + + +class EC2Instance(Instance): + """EC2 backed instance.""" + + platform_name = "ec2" + _ssh_client = None + + def __init__(self, platform, properties, config, features, + image_ami, user_data=None): + """Set up instance. + + @param platform: platform object + @param properties: dictionary of properties + @param config: dictionary of configuration values + @param features: dictionary of supported feature flags + @param image_ami: AWS AMI ID for image to use + @param user_data: test user-data to pass to instance + """ + super(EC2Instance, self).__init__( + platform, image_ami, properties, config, features) + + self.image_ami = image_ami + self.instance = None + self.user_data = user_data + self.ssh_ip = None + self.ssh_port = 22 + self.ssh_key_file = os.path.join( + platform.config['data_dir'], platform.config['private_key']) + self.ssh_pubkey_file = os.path.join( + platform.config['data_dir'], platform.config['public_key']) + + def console_log(self): + """Collect console log from instance. + + The console log is buffered and not always present, therefore + may return empty string. + """ + try: + return self.instance.console_output()['Output'].encode() + except KeyError: + return b'' + + def destroy(self): + """Clean up instance.""" + if self.instance: + LOG.debug('destroying instance %s', self.instance.id) + self.instance.terminate() + self.instance.wait_until_terminated() + + self._ssh_close() + + super(EC2Instance, self).destroy() + + def _execute(self, command, stdin=None, env=None): + """Execute command on instance.""" + env_args = [] + if env: + env_args = ['env'] + ["%s=%s" for k, v in env.items()] + + return self._ssh(['sudo'] + env_args + list(command), stdin=stdin) + + def start(self, wait=True, wait_for_cloud_init=False): + """Start instance on EC2 with the platfrom's VPC.""" + if self.instance: + if self.instance.state['Name'] == 'running': + return + + LOG.debug('starting instance %s', self.instance.id) + self.instance.start() + else: + LOG.debug('launching instance') + + args = { + 'ImageId': self.image_ami, + 'InstanceType': self.platform.instance_type, + 'KeyName': self.platform.key_name, + 'MaxCount': 1, + 'MinCount': 1, + 'SecurityGroupIds': [self.platform.security_group.id], + 'SubnetId': self.platform.subnet.id, + 'TagSpecifications': [{ + 'ResourceType': 'instance', + 'Tags': [{ + 'Key': 'Name', 'Value': self.platform.tag + }] + }], + } + + if self.user_data: + args['UserData'] = self.user_data + + try: + instances = self.platform.ec2_resource.create_instances(**args) + except botocore.exceptions.ClientError as error: + error_msg = error.response['Error']['Message'] + raise util.PlatformError('start', error_msg) + + self.instance = instances[0] + + LOG.debug('instance id: %s', self.instance.id) + if wait: + self.instance.wait_until_running() + self.instance.reload() + self.ssh_ip = self.instance.public_ip_address + self._wait_for_system(wait_for_cloud_init) + + def shutdown(self, wait=True): + """Shutdown instance.""" + LOG.debug('stopping instance %s', self.instance.id) + self.instance.stop() + + if wait: + self.instance.wait_until_stopped() + self.instance.reload() + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py new file mode 100644 index 00000000..fdb17ba0 --- /dev/null +++ b/tests/cloud_tests/platforms/ec2/platform.py @@ -0,0 +1,231 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base EC2 platform.""" +from datetime import datetime +import os + +import boto3 +import botocore + +from ..platforms import Platform +from .image import EC2Image +from .instance import EC2Instance +from tests.cloud_tests import LOG + + +class EC2Platform(Platform): + """EC2 test platform.""" + + platform_name = 'ec2' + ipv4_cidr = '192.168.1.0/20' + + def __init__(self, config): + """Set up platform.""" + super(EC2Platform, self).__init__(config) + # Used for unique VPC, SSH key, and custom AMI generation naming + self.tag = '%s-%s' % ( + config['tag'], datetime.now().strftime('%Y%m%d%H%M%S')) + self.instance_type = config['instance-type'] + + try: + self.ec2_client = boto3.client('ec2') + self.ec2_resource = boto3.resource('ec2') + self.ec2_region = boto3.Session().region_name + self.key_name = self._upload_public_key(config) + except botocore.exceptions.NoRegionError: + raise RuntimeError( + 'Please configure default region in $HOME/.aws/config') + except botocore.exceptions.NoCredentialsError: + raise RuntimeError( + 'Please configure ec2 credentials in $HOME/.aws/credentials') + + self.vpc = self._create_vpc() + self.internet_gateway = self._create_internet_gateway() + self.subnet = self._create_subnet() + self.routing_table = self._create_routing_table() + self.security_group = self._create_security_group() + + def create_instance(self, properties, config, features, + image_ami, user_data=None): + """Create an instance + + @param src_img_path: image path to launch from + @param properties: image properties + @param config: image configuration + @param features: image features + @param image_ami: string of image ami ID + @param user_data: test user-data to pass to instance + @return_value: cloud_tests.instances instance + """ + return EC2Instance(self, properties, config, features, + image_ami, user_data) + + def destroy(self): + """Delete SSH keys, terminate all instances, and delete VPC.""" + for instance in self.vpc.instances.all(): + LOG.debug('waiting for instance %s termination', instance.id) + instance.terminate() + instance.wait_until_terminated() + + if self.key_name: + LOG.debug('deleting SSH key %s', self.key_name) + self.ec2_client.delete_key_pair(KeyName=self.key_name) + + if self.security_group: + LOG.debug('deleting security group %s', self.security_group.id) + self.security_group.delete() + + if self.subnet: + LOG.debug('deleting subnet %s', self.subnet.id) + self.subnet.delete() + + if self.routing_table: + LOG.debug('deleting routing table %s', self.routing_table.id) + self.routing_table.delete() + + if self.internet_gateway: + LOG.debug('deleting internet gateway %s', self.internet_gateway.id) + self.internet_gateway.detach_from_vpc(VpcId=self.vpc.id) + self.internet_gateway.delete() + + if self.vpc: + LOG.debug('deleting vpc %s', self.vpc.id) + self.vpc.delete() + + def get_image(self, img_conf): + """Get image using specified image configuration. + + Hard coded for 'amd64' based images. + + @param img_conf: configuration for image + @return_value: cloud_tests.images instance + """ + if img_conf['root-store'] == 'ebs': + root_store = 'ssd' + elif img_conf['root-store'] == 'instance-store': + root_store = 'instance' + else: + raise RuntimeError('Unknown root-store type: %s' % + (img_conf['root-store'])) + + filters = [ + 'arch=%s' % 'amd64', + 'endpoint=https://ec2.%s.amazonaws.com' % self.ec2_region, + 'region=%s' % self.ec2_region, + 'release=%s' % img_conf['release'], + 'root_store=%s' % root_store, + 'virt=hvm', + ] + + LOG.debug('finding image using streams') + image = self._query_streams(img_conf, filters) + + try: + image_ami = image['id'] + except KeyError: + raise RuntimeError('No images found for %s!' % img_conf['release']) + + LOG.debug('found image: %s', image_ami) + image = EC2Image(self, img_conf, image_ami) + return image + + def _create_internet_gateway(self): + """Create Internet Gateway and assign to VPC.""" + LOG.debug('creating internet gateway') + internet_gateway = self.ec2_resource.create_internet_gateway() + internet_gateway.attach_to_vpc(VpcId=self.vpc.id) + self._tag_resource(internet_gateway) + + return internet_gateway + + def _create_routing_table(self): + """Update default routing table with internet gateway. + + This sets up internet access between the VPC via the internet gateway + by configuring routing tables for IPv4 and IPv6. + """ + LOG.debug('creating routing table') + route_table = self.vpc.create_route_table() + route_table.create_route(DestinationCidrBlock='0.0.0.0/0', + GatewayId=self.internet_gateway.id) + route_table.create_route(DestinationIpv6CidrBlock='::/0', + GatewayId=self.internet_gateway.id) + route_table.associate_with_subnet(SubnetId=self.subnet.id) + self._tag_resource(route_table) + + return route_table + + def _create_security_group(self): + """Enables ingress to default VPC security group.""" + LOG.debug('creating security group') + security_group = self.vpc.create_security_group( + GroupName=self.tag, Description='integration test security group') + security_group.authorize_ingress( + IpProtocol='-1', FromPort=-1, ToPort=-1, CidrIp='0.0.0.0/0') + self._tag_resource(security_group) + + return security_group + + def _create_subnet(self): + """Generate IPv4 and IPv6 subnets for use.""" + ipv6_cidr = self.vpc.ipv6_cidr_block_association_set[0][ + 'Ipv6CidrBlock'][:-2] + '64' + + LOG.debug('creating subnet with following ranges:') + LOG.debug('ipv4: %s', self.ipv4_cidr) + LOG.debug('ipv6: %s', ipv6_cidr) + subnet = self.vpc.create_subnet(CidrBlock=self.ipv4_cidr, + Ipv6CidrBlock=ipv6_cidr) + modify_subnet = subnet.meta.client.modify_subnet_attribute + modify_subnet(SubnetId=subnet.id, + MapPublicIpOnLaunch={'Value': True}) + self._tag_resource(subnet) + + return subnet + + def _create_vpc(self): + """Setup AWS EC2 VPC or return existing VPC.""" + LOG.debug('creating new vpc') + try: + vpc = self.ec2_resource.create_vpc( + CidrBlock=self.ipv4_cidr, + AmazonProvidedIpv6CidrBlock=True) + except botocore.exceptions.ClientError as e: + raise RuntimeError(e) + + vpc.wait_until_available() + self._tag_resource(vpc) + + return vpc + + def _tag_resource(self, resource): + """Tag a resource with the specified tag. + + This makes finding and deleting resources specific to this testing + much easier to find. + + @param resource: resource to tag + """ + tag = { + 'Key': 'Name', + 'Value': self.tag + } + resource.create_tags(Tags=[tag]) + + def _upload_public_key(self, config): + """Generate random name and upload SSH key with that name. + + @param config: platform config + @return: string of ssh key name + """ + key_file = os.path.join(config['data_dir'], config['public_key']) + with open(key_file, 'r') as file: + public_key = file.read().strip('\n') + + LOG.debug('uploading SSH key %s', self.tag) + self.ec2_client.import_key_pair(KeyName=self.tag, + PublicKeyMaterial=public_key) + + return self.tag + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/ec2/snapshot.py b/tests/cloud_tests/platforms/ec2/snapshot.py new file mode 100644 index 00000000..2c48cb54 --- /dev/null +++ b/tests/cloud_tests/platforms/ec2/snapshot.py @@ -0,0 +1,66 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base EC2 snapshot.""" + +from ..snapshots import Snapshot +from tests.cloud_tests import LOG + + +class EC2Snapshot(Snapshot): + """EC2 image copy backed snapshot.""" + + platform_name = 'ec2' + + def __init__(self, platform, properties, config, features, image_ami, + delete_on_destroy=True): + """Set up snapshot. + + @param platform: platform object + @param properties: image properties + @param config: image config + @param features: supported feature flags + @param image_ami: string of image ami ID + @param delete_on_destroy: boolean to delete on destroy + """ + super(EC2Snapshot, self).__init__( + platform, properties, config, features) + + self.image_ami = image_ami + self.delete_on_destroy = delete_on_destroy + + def destroy(self): + """Deregister the backing AMI.""" + if self.delete_on_destroy: + image = self.platform.ec2_resource.Image(self.image_ami) + snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId'] + + LOG.debug('removing custom ami %s', self.image_ami) + self.platform.ec2_client.deregister_image(ImageId=self.image_ami) + + LOG.debug('removing custom snapshot %s', snapshot_id) + self.platform.ec2_client.delete_snapshot(SnapshotId=snapshot_id) + + def launch(self, user_data, meta_data=None, block=True, start=True, + use_desc=None): + """Launch instance. + + @param user_data: user-data for the instance + @param meta_data: meta_data for the instance + @param block: wait until instance is created + @param start: start instance and wait until fully started + @param use_desc: string of test name + @return_value: an Instance + """ + if meta_data is not None: + raise ValueError("metadata not supported on Ec2") + + instance = self.platform.create_instance( + self.properties, self.config, self.features, + self.image_ami, user_data) + + if start: + instance.start() + + return instance + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py index 8c59d62c..3bad021f 100644 --- a/tests/cloud_tests/platforms/instances.py +++ b/tests/cloud_tests/platforms/instances.py @@ -1,14 +1,21 @@ # This file is part of cloud-init. See LICENSE file for license information. """Base instance.""" +import time + +import paramiko +from paramiko.ssh_exception import ( + BadHostKeyException, AuthenticationException, SSHException) from ..util import TargetBase +from tests.cloud_tests import LOG, util class Instance(TargetBase): """Base instance object.""" platform_name = None + _ssh_client = None def __init__(self, platform, name, properties, config, features): """Set up instance. @@ -26,6 +33,11 @@ class Instance(TargetBase): self.features = features self._tmp_count = 0 + self.ssh_ip = None + self.ssh_port = None + self.ssh_key_file = None + self.ssh_username = 'ubuntu' + def console_log(self): """Instance console. @@ -47,7 +59,63 @@ class Instance(TargetBase): def destroy(self): """Clean up instance.""" - pass + self._ssh_close() + + def _ssh(self, command, stdin=None): + """Run a command via SSH.""" + client = self._ssh_connect() + + cmd = util.shell_pack(command) + fp_in, fp_out, fp_err = client.exec_command(cmd) + channel = fp_in.channel + + if stdin is not None: + fp_in.write(stdin) + fp_in.close() + + channel.shutdown_write() + rc = channel.recv_exit_status() + + return (fp_out.read(), fp_err.read(), rc) + + def _ssh_close(self): + if self._ssh_client: + try: + self._ssh_client.close() + except SSHException: + LOG.warning('Failed to close SSH connection.') + self._ssh_client = None + + def _ssh_connect(self): + """Connect via SSH.""" + if self._ssh_client: + return self._ssh_client + + if not self.ssh_ip or not self.ssh_port: + raise ValueError + + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) + + retries = 30 + while retries: + try: + client.connect(username=self.ssh_username, + hostname=self.ssh_ip, port=self.ssh_port, + pkey=private_key, banner_timeout=30) + self._ssh_client = client + return client + except (ConnectionRefusedError, AuthenticationException, + BadHostKeyException, ConnectionResetError, SSHException, + OSError) as e: + retries -= 1 + time.sleep(10) + + ssh_cmd = 'Failed ssh connection to %s@%s:%s after 300 seconds' % ( + self.ssh_username, self.ssh_ip, self.ssh_port + ) + raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh') def _wait_for_system(self, wait_for_cloud_init): """Wait until system has fully booted and cloud-init has finished. diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py index 9bb24256..932dc0fa 100644 --- a/tests/cloud_tests/platforms/nocloudkvm/instance.py +++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py @@ -4,7 +4,6 @@ import copy import os -import paramiko import socket import subprocess import time @@ -13,7 +12,7 @@ import uuid from ..instances import Instance from cloudinit.atomic_helper import write_json from cloudinit import util as c_util -from tests.cloud_tests import util +from tests.cloud_tests import LOG, util # This domain contains reverse lookups for hostnames that are used. # The primary reason is so sudo will return quickly when it attempts @@ -26,7 +25,6 @@ class NoCloudKVMInstance(Instance): """NoCloud KVM backed instance.""" platform_name = "nocloud-kvm" - _ssh_client = None def __init__(self, platform, name, image_path, properties, config, features, user_data, meta_data): @@ -39,6 +37,10 @@ class NoCloudKVMInstance(Instance): @param config: dictionary of configuration values @param features: dictionary of supported feature flags """ + super(NoCloudKVMInstance, self).__init__( + platform, name, properties, config, features + ) + self.user_data = user_data if meta_data: meta_data = copy.deepcopy(meta_data) @@ -66,6 +68,7 @@ class NoCloudKVMInstance(Instance): meta_data['public-keys'] = [] meta_data['public-keys'].append(self.ssh_pubkey) + self.ssh_ip = '127.0.0.1' self.ssh_port = None self.pid = None self.pid_file = None @@ -73,8 +76,33 @@ class NoCloudKVMInstance(Instance): self.disk = image_path self.meta_data = meta_data - super(NoCloudKVMInstance, self).__init__( - platform, name, properties, config, features) + def shutdown(self, wait=True): + """Shutdown instance.""" + + if self.pid: + # This relies on _execute which uses sudo over ssh. The ssh + # connection would get killed before sudo exited, so ignore errors. + cmd = ['shutdown', 'now'] + try: + self._execute(cmd) + except util.InTargetExecuteError: + pass + self._ssh_close() + + if wait: + LOG.debug("Executed shutdown. waiting on pid %s to end", + self.pid) + time_for_shutdown = 120 + give_up_at = time.time() + time_for_shutdown + pid_file_path = '/proc/%s' % self.pid + msg = ("pid %s did not exit in %s seconds after shutdown." % + (self.pid, time_for_shutdown)) + while True: + if not os.path.exists(pid_file_path): + break + if time.time() > give_up_at: + raise util.PlatformError("shutdown", msg) + self.pid = None def destroy(self): """Clean up instance.""" @@ -88,9 +116,7 @@ class NoCloudKVMInstance(Instance): os.remove(self.pid_file) self.pid = None - if self._ssh_client: - self._ssh_client.close() - self._ssh_client = None + self._ssh_close() super(NoCloudKVMInstance, self).destroy() @@ -99,7 +125,7 @@ class NoCloudKVMInstance(Instance): if env: env_args = ['env'] + ["%s=%s" for k, v in env.items()] - return self.ssh(['sudo'] + env_args + list(command), stdin=stdin) + return self._ssh(['sudo'] + env_args + list(command), stdin=stdin) def generate_seed(self, tmpdir): """Generate nocloud seed from user-data""" @@ -125,50 +151,6 @@ class NoCloudKVMInstance(Instance): s.close() return num - def ssh(self, command, stdin=None): - """Run a command via SSH.""" - client = self._ssh_connect() - - cmd = util.shell_pack(command) - try: - fp_in, fp_out, fp_err = client.exec_command(cmd) - channel = fp_in.channel - if stdin is not None: - fp_in.write(stdin) - fp_in.close() - - channel.shutdown_write() - rc = channel.recv_exit_status() - return (fp_out.read(), fp_err.read(), rc) - except paramiko.SSHException as e: - raise util.InTargetExecuteError( - b'', b'', -1, command, self.name, reason=e) - - def _ssh_connect(self, hostname='localhost', username='ubuntu', - banner_timeout=120, retry_attempts=30): - """Connect via SSH.""" - if self._ssh_client: - return self._ssh_client - - private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - while retry_attempts: - try: - client.connect(hostname=hostname, username=username, - port=self.ssh_port, pkey=private_key, - banner_timeout=banner_timeout) - self._ssh_client = client - return client - except (paramiko.SSHException, TypeError): - time.sleep(1) - retry_attempts = retry_attempts - 1 - - error_desc = 'Failed command to: %s@%s:%s' % (username, hostname, - self.ssh_port) - raise util.InTargetExecuteError('', '', -1, 'ssh connect', - self.name, error_desc) - def start(self, wait=True, wait_for_cloud_init=False): """Start instance.""" tmpdir = self.platform.config['data_dir'] diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py index 85933463..a7e6f5de 100644 --- a/tests/cloud_tests/platforms/nocloudkvm/platform.py +++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py @@ -21,6 +21,10 @@ class NoCloudKVMPlatform(Platform): platform_name = 'nocloud-kvm' + def __init__(self, config): + """Set up platform.""" + super(NoCloudKVMPlatform, self).__init__(config) + def get_image(self, img_conf): """Get image using specified image configuration. diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py index 28975368..d4e5c561 100644 --- a/tests/cloud_tests/platforms/platforms.py +++ b/tests/cloud_tests/platforms/platforms.py @@ -1,6 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. """Base platform class.""" +import os + +from simplestreams import filters, mirrors +from simplestreams import util as s_util + +from cloudinit import util as c_util class Platform(object): @@ -11,6 +17,7 @@ class Platform(object): def __init__(self, config): """Set up platform.""" self.config = config + self._generate_ssh_keys(config['data_dir']) def get_image(self, img_conf): """Get image using specified image configuration. @@ -24,4 +31,66 @@ class Platform(object): """Clean up platform data.""" pass + def _generate_ssh_keys(self, data_dir): + """Generate SSH keys to be used with image.""" + filename = os.path.join(data_dir, 'id_rsa') + + if os.path.exists(filename): + c_util.del_file(filename) + + c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096', + '-f', filename, '-P', '', + '-C', 'ubuntu@cloud_test'], + capture=True) + + @staticmethod + def _query_streams(img_conf, img_filter): + """Query streams for latest image given a specific filter. + + @param img_conf: configuration for image + @param filters: array of filters as strings format 'key=value' + @return: dictionary with latest image information or empty + """ + def policy(content, path): + return s_util.read_signed(content, keyring=img_conf['keyring']) + + (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None) + smirror = mirrors.UrlMirrorReader(url, policy=policy) + + config = {'max_items': 1, 'filters': filters.get_filters(img_filter)} + tmirror = FilterMirror(config) + tmirror.sync(smirror, path) + + try: + return tmirror.json_entries[0] + except IndexError: + raise RuntimeError('no images found with filter: %s' % img_filter) + + +class FilterMirror(mirrors.BasicMirrorWriter): + """Taken from sstream-query to return query result as json array.""" + + def __init__(self, config=None): + super(FilterMirror, self).__init__(config=config) + if config is None: + config = {} + self.config = config + self.filters = config.get('filters', []) + self.json_entries = [] + + def load_products(self, path=None, content_id=None): + return {'content_id': content_id, 'products': {}} + + def filter_item(self, data, src, target, pedigree): + return filters.filter_item(self.filters, data, src, pedigree) + + def insert_item(self, data, src, target, pedigree, contentsource): + # src and target are top level products:1.0 + # data is src['products'][ped[0]]['versions'][ped[1]]['items'][ped[2]] + # contentsource is a ContentSource if 'path' exists in data or None + data = s_util.products_exdata(src, pedigree) + if 'path' in data: + data.update({'item_url': contentsource.url}) + self.json_entries.append(data) + # vi: ts=4 expandtab diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index e5933802..48f903b8 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -27,10 +27,14 @@ default_release_config: # features groups and additional feature settings feature_groups: [] features: {} - nocloud-kvm: mirror_url: https://cloud-images.ubuntu.com/daily - mirror_dir: '/srv/citest/nocloud-kvm' + mirror_dir: '/srv/citest/images' keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg + ec2: + # Choose from: [ebs, instance-store] + root-store: ebs + boot_timeout: 300 + nocloud-kvm: setup_overrides: null override_templates: false # lxd specific default configuration options diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py index 179f40db..6d242115 100644 --- a/tests/cloud_tests/setup_image.py +++ b/tests/cloud_tests/setup_image.py @@ -5,7 +5,6 @@ from functools import partial import os -from cloudinit import util as c_util from tests.cloud_tests import LOG from tests.cloud_tests import stage, util @@ -192,20 +191,6 @@ def enable_repo(args, image): image.execute(cmd, description=msg) -def generate_ssh_keys(data_dir): - """Generate SSH keys to be used with image.""" - LOG.info('generating SSH keys') - filename = os.path.join(data_dir, 'id_rsa') - - if os.path.exists(filename): - c_util.del_file(filename) - - c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096', - '-f', filename, '-P', '', - '-C', 'ubuntu@cloud_test'], - capture=True) - - def setup_image(args, image): """Set up image as specified in args. @@ -239,9 +224,6 @@ def setup_image(args, image): LOG.info('setting up %s', image) res = stage.run_stage( 'set up for {}'.format(image), calls, continue_after_error=False) - LOG.debug('after setup complete, installed cloud-init version is: %s', - installed_package_version(image, 'cloud-init')) - generate_ssh_keys(args.data_dir) return res # vi: ts=4 expandtab diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py index 2aedcd0d..6ff285e7 100644 --- a/tests/cloud_tests/util.py +++ b/tests/cloud_tests/util.py @@ -321,9 +321,9 @@ class TargetBase(object): rcs = (0,) if description: - LOG.debug('Executing "%s"', description) + LOG.debug('executing "%s"', description) else: - LOG.debug("Executing command: %s", shell_quote(command)) + LOG.debug("executing command: %s", shell_quote(command)) out, err, rc = self._execute(command=command, stdin=stdin, env=env) @@ -447,6 +447,19 @@ class InTargetExecuteError(c_util.ProcessExecutionError): reason=reason) +class PlatformError(IOError): + """Error type for platform errors.""" + + default_desc = 'unexpected error in platform.' + + def __init__(self, operation, description=None): + """Init error and parent error class.""" + description = description if description else self.default_desc + + message = '%s: %s' % (operation, description) + IOError.__init__(self, message) + + class TempDir(object): """Configurable temporary directory like tempfile.TemporaryDirectory.""" diff --git a/tox.ini b/tox.ini index fdc8a665..88b82dc3 100644 --- a/tox.ini +++ b/tox.ini @@ -134,4 +134,5 @@ passenv = HOME deps = pylxd==2.2.4 paramiko==2.3.1 + boto3==1.4.8 bzr+lp:simplestreams -- cgit v1.2.3 From f794708fabba690677e0e81bd929871c83af3409 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 8 Jan 2018 08:53:14 -0800 Subject: tests: rename test ssh keys to avoid appearance of leaking private keys. While the generated ssh keys are throw away keys, generating SSH keys with the default name (e.g. id_rsa) can trigger security scanners or draw unnecessary attention. The change here simply renames 'id_rsa' to 'cloud_init_rsa' to avoid a false positive reported by a scanning tool. --- tests/cloud_tests/platforms.yaml | 4 ++-- tests/cloud_tests/platforms/platforms.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml index cb1c904b..448aa98d 100644 --- a/tests/cloud_tests/platforms.yaml +++ b/tests/cloud_tests/platforms.yaml @@ -6,8 +6,8 @@ default_platform_config: get_image_timeout: 300 # maximum time to create instance (before waiting for cloud-init) create_instance_timeout: 60 - private_key: id_rsa - public_key: id_rsa.pub + private_key: cloud_init_rsa + public_key: cloud_init_rsa.pub platforms: ec2: enabled: true diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py index d4e5c561..1542b3be 100644 --- a/tests/cloud_tests/platforms/platforms.py +++ b/tests/cloud_tests/platforms/platforms.py @@ -33,7 +33,7 @@ class Platform(object): def _generate_ssh_keys(self, data_dir): """Generate SSH keys to be used with image.""" - filename = os.path.join(data_dir, 'id_rsa') + filename = os.path.join(data_dir, self.config['private_key']) if os.path.exists(filename): c_util.del_file(filename) -- cgit v1.2.3 From 72270e8c311efc8b9ba8bb92492d8728d84bd9f2 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 8 Jan 2018 10:00:35 -0800 Subject: tests: clean up image properties This fixes the incorrectly named 'family' value for images as 'os'. Families are already defined in util.py:OS_FAMILY_MAPPING and a family is a collection of OSes. This makes the properties function part of the super class of image as it is only overrided by the lxd backend. --- tests/cloud_tests/platforms/ec2/image.py | 10 ---------- tests/cloud_tests/platforms/images.py | 3 ++- tests/cloud_tests/platforms/nocloudkvm/image.py | 10 ---------- tests/cloud_tests/releases.yaml | 10 +++++----- 4 files changed, 7 insertions(+), 26 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/platforms/ec2/image.py b/tests/cloud_tests/platforms/ec2/image.py index 53706b1d..7bedf59d 100644 --- a/tests/cloud_tests/platforms/ec2/image.py +++ b/tests/cloud_tests/platforms/ec2/image.py @@ -33,16 +33,6 @@ class EC2Image(Image): self._img_instance.start(wait=True, wait_for_cloud_init=True) return self._img_instance - @property - def properties(self): - """Dictionary containing: 'arch', 'os', 'version', 'release'.""" - return { - 'arch': self.config['arch'], - 'os': self.config['family'], - 'release': self.config['release'], - 'version': self.config['version'], - } - def destroy(self): """Delete the instance used to create a custom image.""" if self._img_instance: diff --git a/tests/cloud_tests/platforms/images.py b/tests/cloud_tests/platforms/images.py index d503108a..557a5cf6 100644 --- a/tests/cloud_tests/platforms/images.py +++ b/tests/cloud_tests/platforms/images.py @@ -26,7 +26,8 @@ class Image(TargetBase): @property def properties(self): """{} containing: 'arch', 'os', 'version', 'release'.""" - raise NotImplementedError + return {k: self.config[k] + for k in ('arch', 'os', 'release', 'version')} @property def features(self): diff --git a/tests/cloud_tests/platforms/nocloudkvm/image.py b/tests/cloud_tests/platforms/nocloudkvm/image.py index 09ff2a3b..bc2b6e75 100644 --- a/tests/cloud_tests/platforms/nocloudkvm/image.py +++ b/tests/cloud_tests/platforms/nocloudkvm/image.py @@ -35,16 +35,6 @@ class NoCloudKVMImage(Image): super(NoCloudKVMImage, self).__init__(platform, config) - @property - def properties(self): - """Dictionary containing: 'arch', 'os', 'version', 'release'.""" - return { - 'arch': self.config['arch'], - 'os': self.config['family'], - 'release': self.config['release'], - 'version': self.config['version'], - } - def _execute(self, command, stdin=None, env=None): """Execute command in image, modifying image.""" return self.mount_image_callback(command, stdin=stdin, env=env) diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index 48f903b8..0a9fa602 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -132,7 +132,7 @@ releases: enabled: true release: bionic version: 18.04 - family: ubuntu + os: ubuntu feature_groups: - base - debian_base @@ -148,7 +148,7 @@ releases: enabled: true release: artful version: 17.10 - family: ubuntu + os: ubuntu feature_groups: - base - debian_base @@ -164,7 +164,7 @@ releases: enabled: true release: zesty version: 17.04 - family: ubuntu + os: ubuntu feature_groups: - base - debian_base @@ -180,7 +180,7 @@ releases: enabled: true release: xenial version: 16.04 - family: ubuntu + os: ubuntu feature_groups: - base - debian_base @@ -196,7 +196,7 @@ releases: enabled: true release: trusty version: 14.04 - family: ubuntu + os: ubuntu feature_groups: - base - debian_base -- cgit v1.2.3 From df24daa833d7eb88e7c172eb5d7f257766adb0e3 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 8 Jan 2018 09:43:44 -0800 Subject: tests: update apt sources list test Due to additional platforms getting added this test was not taking into account platform specific mirrors nor was it checking that no additional entries were added. --- tests/cloud_tests/testcases/modules/apt_configure_sources_list.py | 5 +++++ tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'tests') diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py index 129d2264..cf84e056 100644 --- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py +++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py @@ -10,6 +10,11 @@ class TestAptconfigureSourcesList(base.CloudTestCase): def test_sources_list(self): """Test sources.list includes sources.""" out = self.get_data_file('sources.list') + + # Verify we have 6 entires + self.assertEqual(6, len(out.rstrip().split('\n'))) + + # Verify the keys generated the list correctly self.assertRegex(out, r'deb http:\/\/archive.ubuntu.com\/ubuntu ' '[a-z].* main restricted') self.assertRegex(out, r'deb-src http:\/\/archive.ubuntu.com\/ubuntu ' diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml index 143cb080..87e470c1 100644 --- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml +++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml @@ -7,6 +7,12 @@ required_features: cloud_config: | #cloud-config apt: + primary: + - arches: [default] + uri: http://archive.ubuntu.com/ubuntu + security: + - arches: [default] + uri: http://security.ubuntu.com/ubuntu sources_list: | deb $MIRROR $RELEASE main restricted deb-src $MIRROR $RELEASE main restricted -- cgit v1.2.3 From 5f550420d2ed9d9ef024293f33d33f0f2fc04ee5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 10 Jan 2018 13:53:17 -0700 Subject: MAAS: add check_instance_id based off oauth tokens. This stores a hash of the OAuth tokens as an 'id' for the maas datasource. Since new instances get new tokens created and those tokens are written by curtin into datasource system config this will provide a way to identify a new "instance" (install). LP: #1712680 --- cloudinit/sources/DataSourceMAAS.py | 54 ++++++++++++++++++++-------- tests/unittests/test_datasource/test_maas.py | 53 +++++++++++++++++++++++---- 2 files changed, 86 insertions(+), 21 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 496bd06a..6ac88635 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -8,6 +8,7 @@ from __future__ import print_function +import hashlib import os import time @@ -41,25 +42,20 @@ class DataSourceMAAS(sources.DataSource): """ dsname = "MAAS" + id_hash = None + _oauth_helper = None def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None self.seed_dir = os.path.join(paths.seed_dir, 'maas') - self.oauth_helper = self._get_helper() + self.id_hash = get_id_from_ds_cfg(self.ds_cfg) - def _get_helper(self): - mcfg = self.ds_cfg - # If we are missing token_key, token_secret or consumer_key - # then just do non-authed requests - for required in ('token_key', 'token_secret', 'consumer_key'): - if required not in mcfg: - return url_helper.OauthUrlHelper() - - return url_helper.OauthUrlHelper( - consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], - token_secret=mcfg['token_secret'], - consumer_secret=mcfg.get('consumer_secret')) + @property + def oauth_helper(self): + if not self._oauth_helper: + self._oauth_helper = get_oauth_helper(self.ds_cfg) + return self._oauth_helper def __str__(self): root = sources.DataSource.__str__(self) @@ -147,6 +143,36 @@ class DataSourceMAAS(sources.DataSource): return bool(url) + def check_instance_id(self, sys_cfg): + """locally check if the current system is the same instance. + + MAAS doesn't provide a real instance-id, and if it did, it is + still only available over the network. We need to check based + only on local resources. So compute a hash based on Oauth tokens.""" + if self.id_hash is None: + return False + ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {}) + return (self.id_hash == get_id_from_ds_cfg(ncfg)) + + +def get_oauth_helper(cfg): + """Return an oauth helper instance for values in cfg. + + @raises ValueError from OauthUrlHelper if some required fields have + true-ish values but others do not.""" + keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret') + kwargs = dict([(r, cfg.get(r)) for r in keys]) + return url_helper.OauthUrlHelper(**kwargs) + + +def get_id_from_ds_cfg(ds_cfg): + """Given a config, generate a unique identifier for this node.""" + fields = ('consumer_key', 'token_key', 'token_secret') + idstr = '\0'.join([ds_cfg.get(f, "") for f in fields]) + # store the encoding version as part of the hash in the event + # that it ever changed we can compute older versions. + return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest() + def read_maas_seed_dir(seed_d): if seed_d.startswith("file://"): @@ -322,7 +348,7 @@ if __name__ == "__main__": sys.stderr.write("Must provide a url or a config with url.\n") sys.exit(1) - oauth_helper = url_helper.OauthUrlHelper(**creds) + oauth_helper = get_oauth_helper(creds) def geturl(url): # the retry is to ensure that oauth timestamp gets fixed diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 289c6a40..6e4031cf 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from copy import copy +import mock import os import shutil import tempfile @@ -8,15 +9,10 @@ import yaml from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper -from cloudinit.tests.helpers import TestCase, populate_dir +from cloudinit.tests.helpers import CiTestCase, populate_dir -try: - from unittest import mock -except ImportError: - import mock - -class TestMAASDataSource(TestCase): +class TestMAASDataSource(CiTestCase): def setUp(self): super(TestMAASDataSource, self).setUp() @@ -159,4 +155,47 @@ class TestMAASDataSource(TestCase): self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) self.assertEqual(expected_vd, vd) + +@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper") +class TestGetOauthHelper(CiTestCase): + with_logs = True + base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY', + 'token_key': 'FAKE_TOKEN_KEY', + 'token_secret': 'FAKE_TOKEN_SECRET', + 'consumer_secret': None} + + def test_all_required(self, m_helper): + """Valid config as expected.""" + DataSourceMAAS.get_oauth_helper(self.base_cfg.copy()) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + def test_other_fields_not_passed_through(self, m_helper): + """Only relevant fields are passed through.""" + mycfg = self.base_cfg.copy() + mycfg['unrelated_field'] = 'unrelated' + DataSourceMAAS.get_oauth_helper(mycfg) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + +class TestGetIdHash(CiTestCase): + v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY', + 'token_secret': 'TSEC'} + v1_id = ( + 'v1:' + '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392') + + def test_v1_expected(self): + """Test v1 id generated as expected working behavior from config.""" + result = DataSourceMAAS.get_id_from_ds_cfg(self.v1_cfg.copy()) + self.assertEqual(self.v1_id, result) + + def test_v1_extra_fields_are_ignored(self): + """Test v1 id ignores unused entries in config.""" + cfg = self.v1_cfg.copy() + cfg['consumer_secret'] = "BOO" + cfg['unrelated'] = "HI MOM" + result = DataSourceMAAS.get_id_from_ds_cfg(cfg) + self.assertEqual(self.v1_id, result) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 6299e8d0cc230b0c9b41a69a5963bcd2c252c337 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 12 Jan 2018 14:23:26 -0700 Subject: Do not log warning on config files that represent None. This issue was first identified when manual_cache_clean was set, as ds-identify would write /run/cloud-init/cloud.cfg with # manual_cache_clean that would generate a warning as cloud-init expected to load a dict. Any other "empty" config would also log such a warning. Also fix reading of di_report to allow it to be None, as ds-identify would write: di_report: # manual_cache_clean which reads as 'di_report: None' rather than di_report: {}. LP: #1742479 --- cloudinit/cmd/main.py | 8 +++++++- cloudinit/util.py | 10 +++++----- tests/unittests/test_util.py | 8 ++++++++ 3 files changed, 20 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 30b37fe1..d2f1b778 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -421,7 +421,13 @@ def di_report_warn(datasource, cfg): LOG.debug("no di_report found in config.") return - dicfg = cfg.get('di_report', {}) + dicfg = cfg['di_report'] + if dicfg is None: + # ds-identify may write 'di_report:\n #comment\n' + # which reads as {'di_report': None} + LOG.debug("di_report was None.") + return + if not isinstance(dicfg, dict): LOG.warning("di_report config not a dictionary: %s", dicfg) return diff --git a/cloudinit/util.py b/cloudinit/util.py index 8a9f1ab2..e42498d9 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -891,17 +891,17 @@ def load_yaml(blob, default=None, allowed=(dict,)): "of length %s with allowed root types %s", len(blob), allowed) converted = safeyaml.load(blob) - if not isinstance(converted, allowed): + if converted is None: + LOG.debug("loaded blob returned None, returning default.") + converted = default + elif not isinstance(converted, allowed): # Yes this will just be caught, but thats ok for now... raise TypeError(("Yaml load allows %s root types," " but got %s instead") % (allowed, type_utils.obj_name(converted))) loaded = converted except (yaml.YAMLError, TypeError, ValueError): - if len(blob) == 0: - LOG.debug("load_yaml given empty string, returning default") - else: - logexc(LOG, "Failed loading yaml blob") + logexc(LOG, "Failed loading yaml blob") return loaded diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 787ca208..d63b760e 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -299,6 +299,14 @@ class TestLoadYaml(helpers.TestCase): default=self.mydefault), myobj) + def test_none_returns_default(self): + """If yaml.load returns None, then default should be returned.""" + blobs = ("", " ", "# foo\n", "#") + mdef = self.mydefault + self.assertEqual( + [(b, self.mydefault) for b in blobs], + [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs]) + class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_invalid_mountinfo(self): -- cgit v1.2.3 From 5cc0b19b851a42f6a5edb0cc9d49dd76891b1bcb Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 9 Jan 2018 09:06:33 -0800 Subject: tests: remove zesty as supported OS to test Zesty goes EOL as of January 13, 2017. This removes it as a valid OS for testing. --- tests/cloud_tests/releases.yaml | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index 0a9fa602..d8bc170f 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -158,22 +158,6 @@ releases: alias: artful setup_overrides: null override_templates: false - zesty: - # EOL: Jan 2018 - default: - enabled: true - release: zesty - version: 17.04 - os: ubuntu - feature_groups: - - base - - debian_base - - ubuntu_specific - lxd: - sstreams_server: https://cloud-images.ubuntu.com/daily - alias: zesty - setup_overrides: null - override_templates: false xenial: # EOL: Apr 2021 default: -- cgit v1.2.3 From eb70975eaf37cf9549949f72e7647addb81a52ac Mon Sep 17 00:00:00 2001 From: James Penick Date: Tue, 23 Jan 2018 14:22:54 -0700 Subject: Recognize uppercase vfat disk labels New mkfs.vfat and fatlabel tools included in the dosfsutils package no longer support creating vfat disks with lowercase labels. They silently default to an all uppercase label eg CONFIG-2 instead of config-2. This change makes cloud-init handle either upper or lower case. LP: #1598783 --- cloudinit/sources/DataSourceConfigDrive.py | 4 ++-- tests/unittests/test_datasource/test_configdrive.py | 6 ++++++ tests/unittests/test_ds_identify.py | 17 +++++++++++++++++ tools/ds-identify | 4 +++- 4 files changed, 28 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 870b3688..b8db6267 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -25,7 +25,7 @@ DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } FS_TYPES = ('vfat', 'iso9660') -LABEL_TYPES = ('config-2',) +LABEL_TYPES = ('config-2', 'CONFIG-2') POSSIBLE_MOUNTS = ('sr', 'cd') OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2))) @@ -224,7 +224,7 @@ def find_candidate_devs(probe_optical=True): config drive v2: Disk should be: * either vfat or iso9660 formated - * labeled with 'config-2' + * labeled with 'config-2' or 'CONFIG-2' """ # query optical drive to get it in blkid cache for 2.6 kernels if probe_optical: diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 6ef5a35c..68400f22 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -458,6 +458,12 @@ class TestConfigDriveDataSource(CiTestCase): self.assertEqual(["/dev/vdb3"], ds.find_candidate_devs()) + # Verify that uppercase labels are also found. + devs_with_answers = {"TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=CONFIG-2": ["/dev/vdb"]} + self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) + finally: util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index c9234edd..ad6c5cf4 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -232,6 +232,11 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('ConfigDrive') return + def test_config_drive_upper(self): + """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" + self._test_ds_found('ConfigDriveUpper') + return + def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. @@ -503,6 +508,18 @@ VALID_CFG = { }, ], }, + 'ConfigDriveUpper': { + 'ds': 'ConfigDrive', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CONFIG-2'}]) + }, + ], + }, } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 5893a761..374c3ad1 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -579,6 +579,8 @@ dscheck_NoCloud() { check_configdrive_v2() { if has_fs_with_label "config-2"; then return ${DS_FOUND} + elif has_fs_with_label "CONFIG-2"; then + return ${DS_FOUND} fi # look in /config-drive /seed/config_drive for a directory # openstack/YYYY-MM-DD format with a file meta_data.json @@ -666,7 +668,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From c02a4d4c88cc2c6ec9f03ddf86703f5b67e04348 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 Jan 2018 15:05:35 -0700 Subject: tests: when querying ntp server, do not do dns resolution. Tests run on EC2 would successfully resolve the ipv4 dns address and that caused false positives on failure reports. Basically, dns lookup of 172.16.15.14 would return ip-172-16-15-14.us-east-2.compute.internal. which then shows up in the ntpq output unless you provide -n. --- tests/cloud_tests/testcases/modules/ntp_pools.yaml | 2 +- tests/cloud_tests/testcases/modules/ntp_servers.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.yaml b/tests/cloud_tests/testcases/modules/ntp_pools.yaml index 3a93faa2..d490b228 100644 --- a/tests/cloud_tests/testcases/modules/ntp_pools.yaml +++ b/tests/cloud_tests/testcases/modules/ntp_pools.yaml @@ -26,6 +26,6 @@ collect_scripts: grep '^pool' /etc/ntp.conf ntpq_servers: | #!/bin/sh - ntpq -p -w + ntpq -p -w -n # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.yaml b/tests/cloud_tests/testcases/modules/ntp_servers.yaml index d59d45a8..6b13b70e 100644 --- a/tests/cloud_tests/testcases/modules/ntp_servers.yaml +++ b/tests/cloud_tests/testcases/modules/ntp_servers.yaml @@ -22,6 +22,6 @@ collect_scripts: grep '^server' /etc/ntp.conf ntpq_servers: | #!/bin/sh - ntpq -p -w + ntpq -p -w -n # vi: ts=4 expandtab -- cgit v1.2.3 From 183d5785954af3a1e7603798d4a91ab126eb7bb9 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 Jan 2018 18:08:14 -0700 Subject: subp: make ProcessExecutionError have expected types in stderr, stdout. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When subp raised a ProcessExecutionError, that exception's stderr and stdout might end up being the string '-' rather than bytes. This mean that:    try:        subp(mycommand, decode=False)    except ProcessExecutionError as e:        pass Would have 'e.stdout' set to '-' while the caller would expect bytes. Also reduce the try/except block in subp to a specifically the two lines that may raise an OSError. --- cloudinit/util.py | 94 ++++++++++++++++++++++++-------------------- tests/unittests/test_util.py | 15 +++++++ 2 files changed, 66 insertions(+), 43 deletions(-) (limited to 'tests') diff --git a/cloudinit/util.py b/cloudinit/util.py index e42498d9..df0aa5db 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -253,12 +253,18 @@ class ProcessExecutionError(IOError): self.exit_code = exit_code if not stderr: - self.stderr = self.empty_attr + if stderr is None: + self.stderr = self.empty_attr + else: + self.stderr = stderr else: self.stderr = self._indent_text(stderr) if not stdout: - self.stdout = self.empty_attr + if stdout is None: + self.stdout = self.empty_attr + else: + self.stdout = stdout else: self.stdout = self._indent_text(stdout) @@ -1829,58 +1835,60 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, env = env.copy() env.update(update_env) - try: - if target_path(target) != "/": - args = ['chroot', target] + list(args) + if target_path(target) != "/": + args = ['chroot', target] + list(args) - if not logstring: - LOG.debug(("Running command %s with allowed return codes %s" - " (shell=%s, capture=%s)"), args, rcs, shell, capture) - else: - LOG.debug(("Running hidden command to protect sensitive " - "input/output logstring: %s"), logstring) - - stdin = None - stdout = None - stderr = None - if capture: - stdout = subprocess.PIPE - stderr = subprocess.PIPE - if data is None: - # using devnull assures any reads get null, rather - # than possibly waiting on input. - devnull_fp = open(os.devnull) - stdin = devnull_fp - else: - stdin = subprocess.PIPE - if not isinstance(data, bytes): - data = data.encode() + if not logstring: + LOG.debug(("Running command %s with allowed return codes %s" + " (shell=%s, capture=%s)"), args, rcs, shell, capture) + else: + LOG.debug(("Running hidden command to protect sensitive " + "input/output logstring: %s"), logstring) + + stdin = None + stdout = None + stderr = None + if capture: + stdout = subprocess.PIPE + stderr = subprocess.PIPE + if data is None: + # using devnull assures any reads get null, rather + # than possibly waiting on input. + devnull_fp = open(os.devnull) + stdin = devnull_fp + else: + stdin = subprocess.PIPE + if not isinstance(data, bytes): + data = data.encode() + try: sp = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, env=env, shell=shell) (out, err) = sp.communicate(data) - - # Just ensure blank instead of none. - if not out and capture: - out = b'' - if not err and capture: - err = b'' - if decode: - def ldecode(data, m='utf-8'): - if not isinstance(data, bytes): - return data - return data.decode(m, decode) - - out = ldecode(out) - err = ldecode(err) except OSError as e: - raise ProcessExecutionError(cmd=args, reason=e, - errno=e.errno) + raise ProcessExecutionError( + cmd=args, reason=e, errno=e.errno, + stdout="-" if decode else b"-", + stderr="-" if decode else b"-") finally: if devnull_fp: devnull_fp.close() + # Just ensure blank instead of none. + if not out and capture: + out = b'' + if not err and capture: + err = b'' + if decode: + def ldecode(data, m='utf-8'): + if not isinstance(data, bytes): + return data + return data.decode(m, decode) + + out = ldecode(out) + err = ldecode(err) + rc = sp.returncode if rc not in rcs: raise ProcessExecutionError(stdout=out, stderr=err, diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index d63b760e..4a92e741 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -623,6 +623,7 @@ class TestSubp(helpers.CiTestCase): utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] + bogus_command = 'this-is-not-expected-to-be-a-program-name' def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf @@ -712,6 +713,20 @@ class TestSubp(helpers.CiTestCase): self.assertIsNone(err) self.assertIsNone(out) + def test_exception_has_out_err_are_bytes_if_decode_false(self): + """Raised exc should have stderr, stdout as bytes if no decode.""" + with self.assertRaises(util.ProcessExecutionError) as cm: + util.subp([self.bogus_command], decode=False) + self.assertTrue(isinstance(cm.exception.stdout, bytes)) + self.assertTrue(isinstance(cm.exception.stderr, bytes)) + + def test_exception_has_out_err_are_bytes_if_decode_true(self): + """Raised exc should have stderr, stdout as string if no decode.""" + with self.assertRaises(util.ProcessExecutionError) as cm: + util.subp([self.bogus_command], decode=True) + self.assertTrue(isinstance(cm.exception.stdout, six.string_types)) + self.assertTrue(isinstance(cm.exception.stderr, six.string_types)) + def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", util.target_path("/target/", "//my/path/")) -- cgit v1.2.3 From 2d781c6a3e27433b7fa993cd54b269ceb74e10b2 Mon Sep 17 00:00:00 2001 From: Max Illfelder Date: Tue, 23 Jan 2018 18:12:32 -0700 Subject: GCE: Improvements and changes to ssh key behavior for default user. The behavior changes and improvements include: - Only import keys into the default user that contain the name of the default user ('ubuntu', or 'centos') or that contain 'cloudinit'. - Use instance or project level keys based on GCE convention. - Respect expiration time when keys are set. Do not import expired keys. - Support ssh-keys in project level metadata (the GCE default). As part of this change, we also update the request header when talking to the metadata server based on the documentation: https://cloud.google.com/compute/docs/storing-retrieving-metadata#querying LP: #1670456, #1707033, #1707037, #1707039 --- cloudinit/sources/DataSourceGCE.py | 134 +++++++++++++------ tests/unittests/test_datasource/test_gce.py | 193 +++++++++++++++++++++++++--- 2 files changed, 267 insertions(+), 60 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index ad6dae37..2da34a99 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -2,8 +2,12 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import datetime +import json + from base64 import b64decode +from cloudinit.distros import ug_util from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper @@ -17,16 +21,18 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') class GoogleMetadataFetcher(object): - headers = {'X-Google-Metadata-Request': 'True'} + headers = {'Metadata-Flavor': 'Google'} def __init__(self, metadata_address): self.metadata_address = metadata_address - def get_value(self, path, is_text): + def get_value(self, path, is_text, is_recursive=False): value = None try: - resp = url_helper.readurl(url=self.metadata_address + path, - headers=self.headers) + url = self.metadata_address + path + if is_recursive: + url += '/?recursive=True' + resp = url_helper.readurl(url=url, headers=self.headers) except url_helper.UrlError as exc: msg = "url %s raised exception %s" LOG.debug(msg, path, exc) @@ -35,7 +41,7 @@ class GoogleMetadataFetcher(object): if is_text: value = util.decode_binary(resp.contents) else: - value = resp.contents + value = resp.contents.decode('utf-8') else: LOG.debug("url %s returned code %s", path, resp.code) return value @@ -47,6 +53,10 @@ class DataSourceGCE(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.default_user = None + if distro: + (users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro) + (self.default_user, _user_config) = ug_util.extract_default(users) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), @@ -70,17 +80,18 @@ class DataSourceGCE(sources.DataSource): @property def launch_index(self): - # GCE does not provide lauch_index property + # GCE does not provide lauch_index property. return None def get_instance_id(self): return self.metadata['instance-id'] def get_public_ssh_keys(self): - return self.metadata['public-keys'] + public_keys_data = self.metadata['public-keys-data'] + return _parse_public_keys(public_keys_data, self.default_user) def get_hostname(self, fqdn=False, resolve_ip=False): - # GCE has long FDQN's and has asked for short hostnames + # GCE has long FDQN's and has asked for short hostnames. return self.metadata['local-hostname'].split('.')[0] @property @@ -92,15 +103,58 @@ class DataSourceGCE(sources.DataSource): return self.availability_zone.rsplit('-', 1)[0] -def _trim_key(public_key): - # GCE takes sshKeys attribute in the format of ':' - # so we have to trim each key to remove the username part +def _has_expired(public_key): + # Check whether an SSH key is expired. Public key input is a single SSH + # public key in the GCE specific key format documented here: + # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat + try: + # Check for the Google-specific schema identifier. + schema, json_str = public_key.split(None, 3)[2:] + except (ValueError, AttributeError): + return False + + # Do not expire keys if they do not have the expected schema identifier. + if schema != 'google-ssh': + return False + + try: + json_obj = json.loads(json_str) + except ValueError: + return False + + # Do not expire keys if there is no expriation timestamp. + if 'expireOn' not in json_obj: + return False + + expire_str = json_obj['expireOn'] + format_str = '%Y-%m-%dT%H:%M:%S+0000' try: - index = public_key.index(':') - if index > 0: - return public_key[(index + 1):] - except Exception: - return public_key + expire_time = datetime.datetime.strptime(expire_str, format_str) + except ValueError: + return False + + # Expire the key if and only if we have exceeded the expiration timestamp. + return datetime.datetime.utcnow() > expire_time + + +def _parse_public_keys(public_keys_data, default_user=None): + # Parse the SSH key data for the default user account. Public keys input is + # a list containing SSH public keys in the GCE specific key format + # documented here: + # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat + public_keys = [] + if not public_keys_data: + return public_keys + for public_key in public_keys_data: + if not public_key or not all(ord(c) < 128 for c in public_key): + continue + split_public_key = public_key.split(':', 1) + if len(split_public_key) != 2: + continue + user, key = split_public_key + if user in ('cloudinit', default_user) and not _has_expired(key): + public_keys.append(key) + return public_keys def read_md(address=None, platform_check=True): @@ -116,31 +170,28 @@ def read_md(address=None, platform_check=True): ret['reason'] = "Not running on GCE." return ret - # if we cannot resolve the metadata server, then no point in trying + # If we cannot resolve the metadata server, then no point in trying. if not util.is_resolvable_url(address): LOG.debug("%s is not resolvable", address) ret['reason'] = 'address "%s" is not resolvable' % address return ret - # url_map: (our-key, path, required, is_text) + # url_map: (our-key, path, required, is_text, is_recursive) url_map = [ - ('instance-id', ('instance/id',), True, True), - ('availability-zone', ('instance/zone',), True, True), - ('local-hostname', ('instance/hostname',), True, True), - ('public-keys', ('project/attributes/sshKeys', - 'instance/attributes/ssh-keys'), False, True), - ('user-data', ('instance/attributes/user-data',), False, False), - ('user-data-encoding', ('instance/attributes/user-data-encoding',), - False, True), + ('instance-id', ('instance/id',), True, True, False), + ('availability-zone', ('instance/zone',), True, True, False), + ('local-hostname', ('instance/hostname',), True, True, False), + ('instance-data', ('instance/attributes',), False, False, True), + ('project-data', ('project/attributes',), False, False, True), ] metadata_fetcher = GoogleMetadataFetcher(address) md = {} - # iterate over url_map keys to get metadata items - for (mkey, paths, required, is_text) in url_map: + # Iterate over url_map keys to get metadata items. + for (mkey, paths, required, is_text, is_recursive) in url_map: value = None for path in paths: - new_value = metadata_fetcher.get_value(path, is_text) + new_value = metadata_fetcher.get_value(path, is_text, is_recursive) if new_value is not None: value = new_value if required and value is None: @@ -149,17 +200,23 @@ def read_md(address=None, platform_check=True): return ret md[mkey] = value - if md['public-keys']: - lines = md['public-keys'].splitlines() - md['public-keys'] = [_trim_key(k) for k in lines] + instance_data = json.loads(md['instance-data'] or '{}') + project_data = json.loads(md['project-data'] or '{}') + valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')] + block_project = instance_data.get('block-project-ssh-keys', '').lower() + if block_project != 'true' and not instance_data.get('sshKeys'): + valid_keys.append(project_data.get('ssh-keys')) + valid_keys.append(project_data.get('sshKeys')) + public_keys_data = '\n'.join([key for key in valid_keys if key]) + md['public-keys-data'] = public_keys_data.splitlines() if md['availability-zone']: md['availability-zone'] = md['availability-zone'].split('/')[-1] - encoding = md.get('user-data-encoding') + encoding = instance_data.get('user-data-encoding') if encoding: if encoding == 'base64': - md['user-data'] = b64decode(md['user-data']) + md['user-data'] = b64decode(instance_data.get('user-data')) else: LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) @@ -188,20 +245,19 @@ def platform_reports_gce(): return False -# Used to match classes to dependencies +# Used to match classes to dependencies. datasources = [ (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] -# Return a list of data sources that match this set of dependencies +# Return a list of data sources that match this set of dependencies. def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) if __name__ == "__main__": import argparse - import json import sys from base64 import b64encode @@ -217,7 +273,7 @@ if __name__ == "__main__": data = read_md(address=args.endpoint, platform_check=args.platform_check) if 'user-data' in data: # user-data is bytes not string like other things. Handle it specially. - # if it can be represented as utf-8 then do so. Otherwise print base64 + # If it can be represented as utf-8 then do so. Otherwise print base64 # encoded value in the key user-data-b64. try: data['user-data'] = data['user-data'].decode() @@ -225,7 +281,7 @@ if __name__ == "__main__": sys.stderr.write("User-data cannot be decoded. " "Writing as base64\n") del data['user-data'] - # b64encode returns a bytes value. decode to get the string. + # b64encode returns a bytes value. Decode to get the string. data['user-data-b64'] = b64encode(data['user-data']).decode() print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': '))) diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 82c788dc..12d68009 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -4,13 +4,16 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import datetime import httpretty +import json import mock import re from base64 import b64encode, b64decode from six.moves.urllib_parse import urlparse +from cloudinit import distros from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceGCE @@ -21,10 +24,7 @@ from cloudinit.tests import helpers as test_helpers GCE_META = { 'instance/id': '123', 'instance/zone': 'foo/bar', - 'project/attributes/sshKeys': 'user:ssh-rsa AA2..+aRD0fyVw== root@server', 'instance/hostname': 'server.project-foo.local', - # UnicodeDecodeError below if set to ds.userdata instead of userdata_raw - 'instance/attributes/user-data': b'/bin/echo \xff\n', } GCE_META_PARTIAL = { @@ -37,11 +37,13 @@ GCE_META_ENCODING = { 'instance/id': '12345', 'instance/hostname': 'server.project-baz.local', 'instance/zone': 'baz/bang', - 'instance/attributes/user-data': b64encode(b'/bin/echo baz\n'), - 'instance/attributes/user-data-encoding': 'base64', + 'instance/attributes': { + 'user-data': b64encode(b'/bin/echo baz\n').decode('utf-8'), + 'user-data-encoding': 'base64', + } } -HEADERS = {'X-Google-Metadata-Request': 'True'} +HEADERS = {'Metadata-Flavor': 'Google'} MD_URL_RE = re.compile( r'http://metadata.google.internal/computeMetadata/v1/.*') @@ -54,10 +56,15 @@ def _set_mock_metadata(gce_meta=None): url_path = urlparse(uri).path if url_path.startswith('/computeMetadata/v1/'): path = url_path.split('/computeMetadata/v1/')[1:][0] + recursive = path.endswith('/') + path = path.rstrip('/') else: path = None if path in gce_meta: - return (200, headers, gce_meta.get(path)) + response = gce_meta.get(path) + if recursive: + response = json.dumps(response) + return (200, headers, response) else: return (404, headers, '') @@ -69,6 +76,16 @@ def _set_mock_metadata(gce_meta=None): @httpretty.activate class TestDataSourceGCE(test_helpers.HttprettyTestCase): + def _make_distro(self, dtype, def_user=None): + cfg = dict(settings.CFG_BUILTIN) + cfg['system_info']['distro'] = dtype + paths = helpers.Paths(cfg['system_info']['paths']) + distro_cls = distros.fetch(dtype) + if def_user: + cfg['system_info']['default_user'] = def_user.copy() + distro = distro_cls(dtype, cfg['system_info'], paths) + return distro + def setUp(self): tmp = self.tmp_dir() self.ds = DataSourceGCE.DataSourceGCE( @@ -90,6 +107,10 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertDictContainsSubset(HEADERS, req_header) def test_metadata(self): + # UnicodeDecodeError if set to ds.userdata instead of userdata_raw + meta = GCE_META.copy() + meta['instance/attributes/user-data'] = b'/bin/echo \xff\n' + _set_mock_metadata() self.ds.get_data() @@ -118,8 +139,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): _set_mock_metadata(GCE_META_ENCODING) self.ds.get_data() - decoded = b64decode( - GCE_META_ENCODING.get('instance/attributes/user-data')) + instance_data = GCE_META_ENCODING.get('instance/attributes') + decoded = b64decode(instance_data.get('user-data')) self.assertEqual(decoded, self.ds.get_userdata_raw()) def test_missing_required_keys_return_false(self): @@ -131,33 +152,124 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual(False, self.ds.get_data()) httpretty.reset() - def test_project_level_ssh_keys_are_used(self): + def test_no_ssh_keys_metadata(self): _set_mock_metadata() self.ds.get_data() + self.assertEqual([], self.ds.get_public_ssh_keys()) + + def test_cloudinit_ssh_keys(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + self.ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + @mock.patch("cloudinit.sources.DataSourceGCE.ug_util") + def test_default_user_ssh_keys(self, mock_ug_util): + mock_ug_util.normalize_users_groups.return_value = None, None + mock_ug_util.extract_default.return_value = 'ubuntu', None + ubuntu_ds = DataSourceGCE.DataSourceGCE( + settings.CFG_BUILTIN, self._make_distro('ubuntu'), + helpers.Paths({})) + + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } - # we expect a list of public ssh keys with user names stripped - self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'], - self.ds.get_public_ssh_keys()) + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + ubuntu_ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys())) + + def test_instance_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)), + 'block-project-ssh-keys': 'False', + } - def test_instance_level_ssh_keys_are_used(self): - key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes _set_mock_metadata(meta) self.ds.get_data() - self.assertIn(key_content, self.ds.get_public_ssh_keys()) + expected = [valid_key.format(key) for key in range(2)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + def test_block_project_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'block-project-ssh-keys': 'True', + } - def test_instance_level_keys_replace_project_level_keys(self): - key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes _set_mock_metadata(meta) self.ds.get_data() - self.assertEqual([key_content], self.ds.get_public_ssh_keys()) + expected = [valid_key.format(0)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) def test_only_last_part_of_zone_used_for_availability_zone(self): _set_mock_metadata() @@ -172,5 +284,44 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual(False, ret) m_fetcher.assert_not_called() + def test_has_expired(self): + + def _get_timestamp(days): + format_str = '%Y-%m-%dT%H:%M:%S+0000' + today = datetime.datetime.now() + timestamp = today + datetime.timedelta(days=days) + return timestamp.strftime(format_str) + + past = _get_timestamp(-1) + future = _get_timestamp(1) + ssh_keys = { + None: False, + '': False, + 'Invalid': False, + 'user:ssh-rsa key user@domain.com': False, + 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False, + 'user:ssh-rsa key google-ssh': False, + 'user:ssh-rsa key google-ssh {invalid:json}': False, + 'user:ssh-rsa key google-ssh {"userName":"user"}': False, + 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True, + } + + for key, expired in ssh_keys.items(): + self.assertEqual(DataSourceGCE._has_expired(key), expired) + + def test_parse_public_keys_non_ascii(self): + public_key_data = [ + 'cloudinit:rsa ssh-ke%s invalid' % chr(165), + 'use%sname:rsa ssh-key' % chr(174), + 'cloudinit:test 1', + 'default:test 2', + 'user:test 3', + ] + expected = ['test 1', 'test 2'] + found = DataSourceGCE._parse_public_keys( + public_key_data, default_user='default') + self.assertEqual(sorted(found), sorted(expected)) # vi: ts=4 expandtab -- cgit v1.2.3 From ccbe7f6e53eb243b5c869d4f927b93b47e5cb8cd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 Jan 2018 20:56:31 -0500 Subject: tests: Fix attempted use of /run in a test case. The previous commit added a test that would attempt to create and use /run/cloud-init/. This just modifies it to use a temp dir instead. --- tests/unittests/test_datasource/test_gce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 12d68009..f77c2c40 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -194,7 +194,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): mock_ug_util.extract_default.return_value = 'ubuntu', None ubuntu_ds = DataSourceGCE.DataSourceGCE( settings.CFG_BUILTIN, self._make_distro('ubuntu'), - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp_dir()})) valid_key = 'ssh-rsa VALID {0}' invalid_key = 'ssh-rsa INVALID {0}' -- cgit v1.2.3 From 32a6a1764e902c31dd3af9b674cea14cd6501187 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 19 Jan 2018 09:43:55 -0500 Subject: tests: Fix EC2 Platform to return console output as bytes. The EC2 test platform uses boto, and boto decodes console output with decode('utf-8', 'replace). It is known that Ubuntu consoles contain non-utf8 characters, making this call lossy. The change here is to patch the boto session to include a OutputBytes entry in the console_output response, and then to utilize that in console_log. More information on problem and solution at: https://github.com/boto/botocore/issues/1351 --- tests/cloud_tests/platforms/ec2/instance.py | 10 +++++++-- tests/cloud_tests/platforms/ec2/platform.py | 33 ++++++++++++++++++++++++++--- 2 files changed, 38 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/platforms/ec2/instance.py b/tests/cloud_tests/platforms/ec2/instance.py index 4ba737ab..ab6037b1 100644 --- a/tests/cloud_tests/platforms/ec2/instance.py +++ b/tests/cloud_tests/platforms/ec2/instance.py @@ -46,9 +46,15 @@ class EC2Instance(Instance): may return empty string. """ try: - return self.instance.console_output()['Output'].encode() + # OutputBytes comes from platform._decode_console_output_as_bytes + response = self.instance.console_output() + return response['OutputBytes'] except KeyError: - return b'' + if 'Output' in response: + msg = ("'OutputBytes' did not exist in console_output() but " + "'Output' did: %s..." % response['Output'][0:128]) + raise util.PlatformError('console_log', msg) + return ('No Console Output [%s]' % self.instance).encode() def destroy(self): """Clean up instance.""" diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py index fdb17ba0..f188c27b 100644 --- a/tests/cloud_tests/platforms/ec2/platform.py +++ b/tests/cloud_tests/platforms/ec2/platform.py @@ -6,6 +6,8 @@ import os import boto3 import botocore +from botocore import session, handlers +import base64 from ..platforms import Platform from .image import EC2Image @@ -28,9 +30,10 @@ class EC2Platform(Platform): self.instance_type = config['instance-type'] try: - self.ec2_client = boto3.client('ec2') - self.ec2_resource = boto3.resource('ec2') - self.ec2_region = boto3.Session().region_name + b3session = get_session() + self.ec2_client = b3session.client('ec2') + self.ec2_resource = b3session.resource('ec2') + self.ec2_region = b3session.region_name self.key_name = self._upload_public_key(config) except botocore.exceptions.NoRegionError: raise RuntimeError( @@ -228,4 +231,28 @@ class EC2Platform(Platform): return self.tag + +def _decode_console_output_as_bytes(parsed, **kwargs): + """Provide console output as bytes in OutputBytes. + + For this to be useful, the session has to have had the + decode_console_output handler unregistered already. + + https://github.com/boto/botocore/issues/1351 .""" + if 'Output' not in parsed: + return + orig = parsed['Output'] + handlers.decode_console_output(parsed, **kwargs) + parsed['OutputBytes'] = base64.b64decode(orig) + + +def get_session(): + mysess = session.get_session() + mysess.unregister('after-call.ec2.GetConsoleOutput', + handlers.decode_console_output) + mysess.register('after-call.ec2.GetConsoleOutput', + _decode_console_output_as_bytes) + return boto3.Session(botocore_session=mysess) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 8a9421421497b3e7c05589c62389745d565c6633 Mon Sep 17 00:00:00 2001 From: Akihiko Ota Date: Wed, 13 Dec 2017 23:46:02 +0900 Subject: OpenNebula: Improve network configuration support. Network configuration in OpenNebula would only work if the host correctly guessed the names of the devices in the guest. OpenNebula provided data in its context.sh like 'ETH0_NETWORK', but if the guest named devices differently then results were not predictable. This would occur with Predictable Network Interface Names. To address this, newer versions (of OpenNebula provide the mac address ETH0_MAC. This function is present in 4.14 and documented officially in 5.0 docs. This provides support for reading the mac addresses from the context.sh. It also fixes cases where context.sh provided a field (ETH0_NETWORK or ETH0_MASK) with a empty string. Previously the empty string would be used rather than falling back to the default. LP: #1719157, #1716397, #1736750 --- cloudinit/net/__init__.py | 4 +- cloudinit/sources/DataSourceOpenNebula.py | 112 ++++++----- tests/unittests/test_datasource/test_opennebula.py | 223 ++++++++++++++++----- tests/unittests/test_net.py | 6 +- 4 files changed, 241 insertions(+), 104 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index a1b0db10..c015e793 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -18,7 +18,7 @@ SYS_CLASS_NET = "/sys/class/net/" DEFAULT_PRIMARY_INTERFACE = 'eth0' -def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')): +def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): """Sorting for Humans: natural sort order. Can be use as the key to sort functions. This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as @@ -224,7 +224,7 @@ def find_fallback_nic(blacklist_drivers=None): # if eth0 exists use it above anything else, otherwise get the interface # that we can read 'first' (using the sorted defintion of first). - names = list(sorted(potential_interfaces, key=_natural_sort_key)) + names = list(sorted(potential_interfaces, key=natural_sort_key)) if DEFAULT_PRIMARY_INTERFACE in names: names.remove(DEFAULT_PRIMARY_INTERFACE) names.insert(0, DEFAULT_PRIMARY_INTERFACE) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index f66c95d7..ce47b6bd 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -12,6 +12,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import collections import os import pwd import re @@ -19,6 +20,7 @@ import string from cloudinit import log as logging from cloudinit import net +from cloudinit.net import eni from cloudinit import sources from cloudinit import util @@ -89,11 +91,18 @@ class DataSourceOpenNebula(sources.DataSource): return False self.seed = seed - self.network_eni = results.get("network_config") + self.network_eni = results.get('network-interfaces') self.metadata = md self.userdata_raw = results.get('userdata') return True + @property + def network_config(self): + if self.network_eni is not None: + return eni.convert_eni_data(self.network_eni) + else: + return None + def get_hostname(self, fqdn=False, resolve_ip=None): if resolve_ip is None: if self.dsmode == sources.DSMODE_NETWORK: @@ -116,58 +125,53 @@ class OpenNebulaNetwork(object): self.context = context if system_nics_by_mac is None: system_nics_by_mac = get_physical_nics_by_mac() - self.ifaces = system_nics_by_mac + self.ifaces = collections.OrderedDict( + [k for k in sorted(system_nics_by_mac.items(), + key=lambda k: net.natural_sort_key(k[1]))]) + + # OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC. + # context_devname provides {mac.lower():ETHX, mac2.lower():ETHX} + self.context_devname = {} + for k, v in context.items(): + m = re.match(r'^(.+)_MAC$', k) + if m: + self.context_devname[v.lower()] = m.group(1) def mac2ip(self, mac): - components = mac.split(':')[2:] - return [str(int(c, 16)) for c in components] + return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]]) - def get_ip(self, dev, components): - var_name = dev.upper() + '_IP' - if var_name in self.context: - return self.context[var_name] - else: - return '.'.join(components) + def mac2network(self, mac): + return self.mac2ip(mac).rpartition(".")[0] + ".0" - def get_mask(self, dev): - var_name = dev.upper() + '_MASK' - if var_name in self.context: - return self.context[var_name] - else: - return '255.255.255.0' + def get_dns(self, dev): + return self.get_field(dev, "dns", "").split() - def get_network(self, dev, components): - var_name = dev.upper() + '_NETWORK' - if var_name in self.context: - return self.context[var_name] - else: - return '.'.join(components[:-1]) + '.0' + def get_domain(self, dev): + return self.get_field(dev, "domain") + + def get_ip(self, dev, mac): + return self.get_field(dev, "ip", self.mac2ip(mac)) def get_gateway(self, dev): - var_name = dev.upper() + '_GATEWAY' - if var_name in self.context: - return self.context[var_name] - else: - return None + return self.get_field(dev, "gateway") - def get_dns(self, dev): - var_name = dev.upper() + '_DNS' - if var_name in self.context: - return self.context[var_name] - else: - return None + def get_mask(self, dev): + return self.get_field(dev, "mask", "255.255.255.0") - def get_domain(self, dev): - var_name = dev.upper() + '_DOMAIN' - if var_name in self.context: - return self.context[var_name] - else: - return None + def get_network(self, dev, mac): + return self.get_field(dev, "network", self.mac2network(mac)) + + def get_field(self, dev, name, default=None): + """return the field name in context for device dev. + + context stores _ (example: eth0_DOMAIN). + an empty string for value will return default.""" + val = self.context.get('_'.join((dev, name,)).upper()) + # allow empty string to return the default. + return default if val in (None, "") else val def gen_conf(self): - global_dns = [] - if 'DNS' in self.context: - global_dns.append(self.context['DNS']) + global_dns = self.context.get('DNS', "").split() conf = [] conf.append('auto lo') @@ -175,29 +179,31 @@ class OpenNebulaNetwork(object): conf.append('') for mac, dev in self.ifaces.items(): - ip_components = self.mac2ip(mac) + mac = mac.lower() + + # c_dev stores name in context 'ETHX' for this device. + # dev stores the current system name. + c_dev = self.context_devname.get(mac, dev) conf.append('auto ' + dev) conf.append('iface ' + dev + ' inet static') - conf.append(' address ' + self.get_ip(dev, ip_components)) - conf.append(' network ' + self.get_network(dev, ip_components)) - conf.append(' netmask ' + self.get_mask(dev)) + conf.append(' #hwaddress %s' % mac) + conf.append(' address ' + self.get_ip(c_dev, mac)) + conf.append(' network ' + self.get_network(c_dev, mac)) + conf.append(' netmask ' + self.get_mask(c_dev)) - gateway = self.get_gateway(dev) + gateway = self.get_gateway(c_dev) if gateway: conf.append(' gateway ' + gateway) - domain = self.get_domain(dev) + domain = self.get_domain(c_dev) if domain: conf.append(' dns-search ' + domain) # add global DNS servers to all interfaces - dns = self.get_dns(dev) + dns = self.get_dns(c_dev) if global_dns or dns: - all_dns = global_dns - if dns: - all_dns.append(dns) - conf.append(' dns-nameservers ' + ' '.join(all_dns)) + conf.append(' dns-nameservers ' + ' '.join(global_dns + dns)) conf.append('') diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 2326dd58..5c3ba012 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -4,6 +4,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util from cloudinit.tests.helpers import mock, populate_dir, CiTestCase +from textwrap import dedent import os import pwd @@ -30,6 +31,8 @@ USER_DATA = '#cloud-config\napt_upgrade: true' SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' HOSTNAME = 'foo.example.com' PUBLIC_IP = '10.0.0.3' +MACADDR = '02:00:0a:12:01:01' +IP_BY_MACADDR = '10.18.1.1' DS_PATH = "cloudinit.sources.DataSourceOpenNebula" @@ -195,24 +198,96 @@ class TestOpenNebulaDataSource(CiTestCase): @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_hostname(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} - for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: PUBLIC_IP}) - results = ds.read_context_disk_dir(my_d) + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: PUBLIC_IP}) + results = ds.read_context_disk_dir(my_d) - self.assertTrue('metadata' in results) - self.assertTrue('local-hostname' in results['metadata']) - self.assertEqual(PUBLIC_IP, results['metadata']['local-hostname']) + self.assertTrue('metadata' in results) + self.assertTrue('local-hostname' in results['metadata']) + self.assertEqual( + PUBLIC_IP, results['metadata']['local-hostname']) @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_network_interfaces(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} - populate_context_dir(self.seed_dir, {'ETH0_IP': '1.2.3.4'}) - results = ds.read_context_disk_dir(self.seed_dir) - - self.assertTrue('network-interfaces' in results) - self.assertTrue('1.2.3.4' in results['network-interfaces']) + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + + # without ETH0_MAC + # for Older OpenNebula? + populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_IP and ETH0_MAC + populate_context_dir( + self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_IP with empty string and ETH0_MAC + # in the case of using Virtual Network contains + # "AR = [ TYPE = ETHER ]" + populate_context_dir( + self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_NETWORK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_NETWORK': '10.18.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('10.18.0.0' in results['network-interfaces']) + + # ETH0_NETWORK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_NETWORK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('10.18.1.0' in results['network-interfaces']) + + # ETH0_MASK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '255.255.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('255.255.0.0' in results['network-interfaces']) + + # ETH0_MASK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('255.255.255.0' in results['network-interfaces']) def test_find_candidates(self): def my_devs_with(criteria): @@ -233,7 +308,7 @@ class TestOpenNebulaDataSource(CiTestCase): class TestOpenNebulaNetwork(unittest.TestCase): - system_nics = {'02:00:0a:12:01:01': 'eth0'} + system_nics = ('eth0', 'ens3') def test_lo(self): net = ds.OpenNebulaNetwork(context={}, system_nics_by_mac={}) @@ -244,45 +319,101 @@ iface lo inet loopback @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_eth0(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = self.system_nics - net = ds.OpenNebulaNetwork({}) - self.assertEqual(net.gen_conf(), u'''\ -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 10.18.1.1 - network 10.18.1.0 - netmask 255.255.255.0 -''') + for nic in self.system_nics: + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork({}) + self.assertEqual(net.gen_conf(), dedent("""\ + auto lo + iface lo inet loopback + + auto {dev} + iface {dev} inet static + #hwaddress {macaddr} + address 10.18.1.1 + network 10.18.1.0 + netmask 255.255.255.0 + """.format(dev=nic, macaddr=MACADDR))) def test_eth0_override(self): context = { 'DNS': '1.2.3.8', - 'ETH0_IP': '1.2.3.4', - 'ETH0_NETWORK': '1.2.3.0', + 'ETH0_IP': '10.18.1.1', + 'ETH0_NETWORK': '10.18.0.0', 'ETH0_MASK': '255.255.0.0', 'ETH0_GATEWAY': '1.2.3.5', 'ETH0_DOMAIN': 'example.com', - 'ETH0_DNS': '1.2.3.6 1.2.3.7' + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_MAC': '02:00:0a:12:01:01' } - - net = ds.OpenNebulaNetwork(context, - system_nics_by_mac=self.system_nics) - self.assertEqual(net.gen_conf(), u'''\ -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 1.2.3.4 - network 1.2.3.0 - netmask 255.255.0.0 - gateway 1.2.3.5 - dns-search example.com - dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 -''') + for nic in self.system_nics: + expected = dedent("""\ + auto lo + iface lo inet loopback + + auto {dev} + iface {dev} inet static + #hwaddress {macaddr} + address 10.18.1.1 + network 10.18.0.0 + netmask 255.255.0.0 + gateway 1.2.3.5 + dns-search example.com + dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 + """).format(dev=nic, macaddr=MACADDR) + net = ds.OpenNebulaNetwork(context, + system_nics_by_mac={MACADDR: nic}) + self.assertEqual(expected, net.gen_conf()) + + def test_multiple_nics(self): + """Test rendering multiple nics with names that differ from context.""" + MAC_1 = "02:00:0a:12:01:01" + MAC_2 = "02:00:0a:12:01:02" + context = { + 'DNS': '1.2.3.8', + 'ETH0_IP': '10.18.1.1', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_DOMAIN': 'example.com', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_MAC': MAC_2, + 'ETH3_IP': '10.3.1.3', + 'ETH3_NETWORK': '10.3.0.0', + 'ETH3_MASK': '255.255.0.0', + 'ETH3_GATEWAY': '10.3.0.1', + 'ETH3_DOMAIN': 'third.example.com', + 'ETH3_DNS': '10.3.1.2', + 'ETH3_MAC': MAC_1, + } + net = ds.OpenNebulaNetwork( + context, system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}) + + expected = dedent("""\ + auto lo + iface lo inet loopback + + auto enp0s25 + iface enp0s25 inet static + #hwaddress 02:00:0a:12:01:01 + address 10.3.1.3 + network 10.3.0.0 + netmask 255.255.0.0 + gateway 10.3.0.1 + dns-search third.example.com + dns-nameservers 1.2.3.8 10.3.1.2 + + auto enp1s2 + iface enp1s2 inet static + #hwaddress 02:00:0a:12:01:02 + address 10.18.1.1 + network 10.18.0.0 + netmask 255.255.0.0 + gateway 1.2.3.5 + dns-search example.com + dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 + """) + + self.assertEqual(expected, net.gen_conf()) class TestParseShellConfig(unittest.TestCase): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index f3fa2a30..ddea13d7 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,9 +1,9 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import net -from cloudinit.net import _natural_sort_key from cloudinit.net import cmdline from cloudinit.net import eni +from cloudinit.net import natural_sort_key from cloudinit.net import netplan from cloudinit.net import network_state from cloudinit.net import renderers @@ -2708,11 +2708,11 @@ class TestInterfacesSorting(CiTestCase): def test_natural_order(self): data = ['ens5', 'ens6', 'ens3', 'ens20', 'ens13', 'ens2'] self.assertEqual( - sorted(data, key=_natural_sort_key), + sorted(data, key=natural_sort_key), ['ens2', 'ens3', 'ens5', 'ens6', 'ens13', 'ens20']) data2 = ['enp2s0', 'enp2s3', 'enp0s3', 'enp0s13', 'enp0s8', 'enp1s2'] self.assertEqual( - sorted(data2, key=_natural_sort_key), + sorted(data2, key=natural_sort_key), ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) -- cgit v1.2.3 From b28ab78089d362c5c6cab985feee0f5f84c9db44 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Mon, 27 Nov 2017 19:05:52 -0500 Subject: btrfs: support resizing if root is mounted ro. Resize of btrfs fails if the mount point for the file system we are trying to resize, i.e. the root of the filesystem is read only. With this change we use a known (currently snapper specific) rw location to work around a flaw that blocks resizing of the ro filesystem. LP: #1734787 --- cloudinit/config/cc_resizefs.py | 12 +++++- cloudinit/tests/test_util.py | 46 ++++++++++++++++++++++ cloudinit/util.py | 23 ++++++++--- .../test_handler/test_handler_resizefs.py | 22 ++++++++++- 4 files changed, 95 insertions(+), 8 deletions(-) create mode 100644 cloudinit/tests/test_util.py (limited to 'tests') diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 0d282e63..cec22bb7 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -59,7 +59,17 @@ __doc__ = get_schema_doc(schema) # Supplement python help() def _resize_btrfs(mount_point, devpth): - return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + # If "/" is ro resize will fail. However it should be allowed since resize + # makes everything bigger and subvolumes that are not ro will benefit. + # Use a subvolume that is not ro to trick the resize operation to do the + # "right" thing. The use of ".snapshot" is specific to "snapper" a generic + # solution would be walk the subvolumes and find a rw mounted subvolume. + if (not util.mount_is_read_write(mount_point) and + os.path.isdir("%s/.snapshots" % mount_point)): + return ('btrfs', 'filesystem', 'resize', 'max', + '%s/.snapshots' % mount_point) + else: + return ('btrfs', 'filesystem', 'resize', 'max', mount_point) def _resize_ext(mount_point, devpth): diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py new file mode 100644 index 00000000..ba6bf699 --- /dev/null +++ b/cloudinit/tests/test_util.py @@ -0,0 +1,46 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.util""" + +import logging + +import cloudinit.util as util + +from cloudinit.tests.helpers import CiTestCase, mock + +LOG = logging.getLogger(__name__) + +MOUNT_INFO = [ + '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', + '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' +] + + +class TestUtil(CiTestCase): + + def test_parse_mount_info_no_opts_no_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_no_opts_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_with_opts(self): + result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) + self.assertEqual( + ('/dev/sda1', 'btrfs', '/', 'ro,relatime'), + result + ) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_rw(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, True) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_ro(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, False) diff --git a/cloudinit/util.py b/cloudinit/util.py index df0aa5db..9976400f 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2059,7 +2059,7 @@ def expand_package_list(version_fmt, pkgs): return pkglist -def parse_mount_info(path, mountinfo_lines, log=LOG): +def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False): """Return the mount information for PATH given the lines from /proc/$$/mountinfo.""" @@ -2121,11 +2121,16 @@ def parse_mount_info(path, mountinfo_lines, log=LOG): match_mount_point = mount_point match_mount_point_elements = mount_point_elements + mount_options = parts[5] - if devpth and fs_type and match_mount_point: - return (devpth, fs_type, match_mount_point) + if get_mnt_opts: + if devpth and fs_type and match_mount_point and mount_options: + return (devpth, fs_type, match_mount_point, mount_options) else: - return None + if devpth and fs_type and match_mount_point: + return (devpth, fs_type, match_mount_point) + + return None def parse_mtab(path): @@ -2195,7 +2200,7 @@ def parse_mount(path): return None -def get_mount_info(path, log=LOG): +def get_mount_info(path, log=LOG, get_mnt_opts=False): # Use /proc/$$/mountinfo to find the device where path is mounted. # This is done because with a btrfs filesystem using os.stat(path) # does not return the ID of the device. @@ -2227,7 +2232,7 @@ def get_mount_info(path, log=LOG): mountinfo_path = '/proc/%s/mountinfo' % os.getpid() if os.path.exists(mountinfo_path): lines = load_file(mountinfo_path).splitlines() - return parse_mount_info(path, lines, log) + return parse_mount_info(path, lines, log, get_mnt_opts) elif os.path.exists("/etc/mtab"): return parse_mtab(path) else: @@ -2613,4 +2618,10 @@ def wait_for_files(flist, maxwait, naplen=.5, log_pre=""): return need +def mount_is_read_write(mount_point): + """Check whether the given mount point is mounted rw""" + result = get_mount_info(mount_point, get_mnt_opts=True) + mount_opts = result[-1].split(',') + return mount_opts[0] == 'rw' + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 29d5574d..5aa3c498 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.config.cc_resizefs import ( - can_skip_resize, handle, maybe_get_writable_device_path) + can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs) from collections import namedtuple import logging @@ -293,5 +293,25 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): " per kernel cmdline", self.logs.getvalue()) + @mock.patch('cloudinit.util.mount_is_read_write') + @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw): + """Do not resize / directly if it is read-only. (LP: #1734787).""" + m_is_rw.return_value = False + m_is_dir.return_value = True + self.assertEqual( + ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'), + _resize_btrfs("/", "/dev/sda1")) + + @mock.patch('cloudinit.util.mount_is_read_write') + @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw): + """Do not resize / directly if it is read-only. (LP: #1734787).""" + m_is_rw.return_value = True + m_is_dir.return_value = True + self.assertEqual( + ('btrfs', 'filesystem', 'resize', 'max', '/'), + _resize_btrfs("/", "/dev/sda1")) + # vi: ts=4 expandtab -- cgit v1.2.3 From c03bdd3d8ed762cada813c5e95a40b14d2047b57 Mon Sep 17 00:00:00 2001 From: Douglas Jordan Date: Wed, 24 Jan 2018 16:10:08 -0700 Subject: Azure VM Preprovisioning support. This change will enable azure vms to report provisioning has completed twice, first to tell the fabric it has completed then a second time to enable customer settings. The datasource for the second provisioning is the Instance Metadata Service (IMDS),and the VM will poll indefinitely for the new ovf-env.xml from IMDS. This branch introduces EphemeralDHCPv4 which encapsulates common logic used by both DataSourceEc2 an DataSourceAzure for temporary DHCP interactions without side-effects. LP: #1734991 --- .gitignore | 1 + cloudinit/net/dhcp.py | 43 ++++++- cloudinit/net/network_state.py | 12 ++ cloudinit/sources/DataSourceAzure.py | 138 ++++++++++++++++++++-- cloudinit/sources/DataSourceEc2.py | 23 ++-- cloudinit/sources/helpers/azure.py | 22 ++-- cloudinit/temp_utils.py | 11 +- cloudinit/url_helper.py | 29 +++-- tests/unittests/test_datasource/test_azure.py | 157 +++++++++++++++++++++++++- tests/unittests/test_datasource/test_ec2.py | 2 +- tests/unittests/test_net.py | 12 ++ 11 files changed, 397 insertions(+), 53 deletions(-) (limited to 'tests') diff --git a/.gitignore b/.gitignore index b0500a68..75565ed4 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ parts prime stage *.snap +*.cover diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 875a4609..087c0c03 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -10,7 +10,9 @@ import os import re import signal -from cloudinit.net import find_fallback_nic, get_devicelist +from cloudinit.net import ( + EphemeralIPv4Network, find_fallback_nic, get_devicelist) +from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip from cloudinit import temp_utils from cloudinit import util from six import StringIO @@ -29,6 +31,45 @@ class InvalidDHCPLeaseFileError(Exception): pass +class NoDHCPLeaseError(Exception): + """Raised when unable to get a DHCP lease.""" + pass + + +class EphemeralDHCPv4(object): + def __init__(self, iface=None): + self.iface = iface + self._ephipv4 = None + + def __enter__(self): + try: + leases = maybe_perform_dhcp_discovery(self.iface) + except InvalidDHCPLeaseFileError: + raise NoDHCPLeaseError() + if not leases: + raise NoDHCPLeaseError() + lease = leases[-1] + LOG.debug("Received dhcp lease on %s for %s/%s", + lease['interface'], lease['fixed-address'], + lease['subnet-mask']) + nmap = {'interface': 'interface', 'ip': 'fixed-address', + 'prefix_or_mask': 'subnet-mask', + 'broadcast': 'broadcast-address', + 'router': 'routers'} + kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) + if not kwargs['broadcast']: + kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) + ephipv4 = EphemeralIPv4Network(**kwargs) + ephipv4.__enter__() + self._ephipv4 = ephipv4 + return lease + + def __exit__(self, excp_type, excp_value, excp_traceback): + if not self._ephipv4: + return + self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) + + def maybe_perform_dhcp_discovery(nic=None): """Perform dhcp discovery if nic valid and dhclient command exists. diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 31738c73..fe667d88 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -961,4 +961,16 @@ def mask_to_net_prefix(mask): return ipv4_mask_to_net_prefix(mask) +def mask_and_ipv4_to_bcast_addr(mask, ip): + """Calculate the broadcast address from the subnet mask and ip addr. + + Supports ipv4 only.""" + ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2) + mask_dec = ipv4_mask_to_net_prefix(mask) + bcast_bin = ip_bin | (2**(32 - mask_dec) - 1) + bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF) + for i in range(4)[::-1]]) + return bcast_str + + # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d1d09757..4bcbf3a4 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -11,13 +11,16 @@ from functools import partial import os import os.path import re +from time import time from xml.dom import minidom import xml.etree.ElementTree as ET from cloudinit import log as logging from cloudinit import net +from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric +from cloudinit.url_helper import readurl, wait_for_url, UrlError from cloudinit import util LOG = logging.getLogger(__name__) @@ -44,6 +47,9 @@ LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' +REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" +IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" +IMDS_RETRIES = 5 def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -276,19 +282,20 @@ class DataSourceAzure(sources.DataSource): with temporary_hostname(azure_hostname, self.ds_cfg, hostname_command=hostname_command) \ - as previous_hostname: - if (previous_hostname is not None and + as previous_hn: + if (previous_hn is not None and util.is_true(self.ds_cfg.get('set_hostname'))): cfg = self.ds_cfg['hostname_bounce'] # "Bouncing" the network try: - perform_hostname_bounce(hostname=azure_hostname, - cfg=cfg, - prev_hostname=previous_hostname) + return perform_hostname_bounce(hostname=azure_hostname, + cfg=cfg, + prev_hostname=previous_hn) except Exception as e: LOG.warning("Failed publishing hostname: %s", e) util.logexc(LOG, "handling set_hostname failed") + return False def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') @@ -345,15 +352,20 @@ class DataSourceAzure(sources.DataSource): ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] + if os.path.isfile(REPROVISION_MARKER_FILE): + candidates.insert(0, "IMDS") candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None - + reprovision = False for cdev in candidates: try: - if cdev.startswith("/dev/"): + if cdev == "IMDS": + ret = None + reprovision = True + elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, mtype="udf", sync=False) @@ -370,6 +382,8 @@ class DataSourceAzure(sources.DataSource): LOG.warning("%s was not mountable", cdev) continue + if reprovision or self._should_reprovision(ret): + ret = self._reprovision() (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) @@ -428,6 +442,83 @@ class DataSourceAzure(sources.DataSource): LOG.debug("negotiating already done for %s", self.get_instance_id()) + def _poll_imds(self, report_ready=True): + """Poll IMDS for the new provisioning data until we get a valid + response. Then return the returned JSON object.""" + url = IMDS_URL + "?api-version=2017-04-02" + headers = {"Metadata": "true"} + LOG.debug("Start polling IMDS") + + def sleep_cb(response, loop_n): + return 1 + + def exception_cb(msg, exception): + if isinstance(exception, UrlError) and exception.code == 404: + return + LOG.warning("Exception during polling. Will try DHCP.", + exc_info=True) + + # If we get an exception while trying to call IMDS, we + # call DHCP and setup the ephemeral network to acquire the new IP. + raise exception + + need_report = report_ready + for i in range(IMDS_RETRIES): + try: + with EphemeralDHCPv4() as lease: + if need_report: + self._report_ready(lease=lease) + need_report = False + wait_for_url([url], max_wait=None, timeout=60, + status_cb=LOG.info, + headers_cb=lambda url: headers, sleep_time=1, + exception_cb=exception_cb, + sleep_time_cb=sleep_cb) + return str(readurl(url, headers=headers)) + except Exception: + LOG.debug("Exception during polling-retrying dhcp" + + " %d more time(s).", (IMDS_RETRIES - i), + exc_info=True) + + def _report_ready(self, lease): + """Tells the fabric provisioning has completed + before we go into our polling loop.""" + try: + get_metadata_from_fabric(None, lease['unknown-245']) + except Exception as exc: + LOG.warning( + "Error communicating with Azure fabric; You may experience." + "connectivity issues.", exc_info=True) + + def _should_reprovision(self, ret): + """Whether or not we should poll IMDS for reprovisioning data. + Also sets a marker file to poll IMDS. + + The marker file is used for the following scenario: the VM boots into + this polling loop, which we expect to be proceeding infinitely until + the VM is picked. If for whatever reason the platform moves us to a + new host (for instance a hardware issue), we need to keep polling. + However, since the VM reports ready to the Fabric, we will not attach + the ISO, thus cloud-init needs to have a way of knowing that it should + jump back into the polling loop in order to retrieve the ovf_env.""" + if not ret: + return False + (md, self.userdata_raw, cfg, files) = ret + path = REPROVISION_MARKER_FILE + if (cfg.get('PreprovisionedVm') is True or + os.path.isfile(path)): + if not os.path.isfile(path): + LOG.info("Creating a marker file to poll imds") + util.write_file(path, "%s: %s\n" % (os.getpid(), time())) + return True + return False + + def _reprovision(self): + """Initiate the reprovisioning workflow.""" + contents = self._poll_imds() + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + def _negotiate(self): """Negotiate with fabric and return data from it. @@ -453,7 +544,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False - + util.del_file(REPROVISION_MARKER_FILE) return fabric_data def activate(self, cfg, is_new_instance): @@ -595,6 +686,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command + # Returns True if the network was bounced, False otherwise. command = cfg['command'] interface = cfg['interface'] policy = cfg['policy'] @@ -614,7 +706,8 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): else: LOG.debug( "Skipping network bounce: ifupdown utils aren't present.") - return # Don't bounce as networkd handles hostname DDNS updates + # Don't bounce as networkd handles hostname DDNS updates + return False LOG.debug("pubhname: publishing hostname [%s]", msg) shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. @@ -622,6 +715,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): get_uptime=True, func=util.subp, kwargs={'args': command, 'shell': shell, 'capture': False, 'env': env}) + return True def crtfile_to_pubkey(fname, data=None): @@ -838,9 +932,35 @@ def read_azure_ovf(contents): if 'ssh_pwauth' not in cfg and password: cfg['ssh_pwauth'] = True + cfg['PreprovisionedVm'] = _extract_preprovisioned_vm_setting(dom) + return (md, ud, cfg) +def _extract_preprovisioned_vm_setting(dom): + """Read the preprovision flag from the ovf. It should not + exist unless true.""" + platform_settings_section = find_child( + dom.documentElement, + lambda n: n.localName == "PlatformSettingsSection") + if not platform_settings_section or len(platform_settings_section) == 0: + LOG.debug("PlatformSettingsSection not found") + return False + platform_settings = find_child( + platform_settings_section[0], + lambda n: n.localName == "PlatformSettings") + if not platform_settings or len(platform_settings) == 0: + LOG.debug("PlatformSettings not found") + return False + preprovisionedVm = find_child( + platform_settings[0], + lambda n: n.localName == "PreprovisionedVm") + if not preprovisionedVm or len(preprovisionedVm) == 0: + LOG.debug("PreprovisionedVm not found") + return False + return util.translate_bool(preprovisionedVm[0].firstChild.nodeValue) + + def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0f89f34d..e14553b3 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -14,7 +14,7 @@ import time from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import net -from cloudinit.net import dhcp +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util @@ -102,22 +102,13 @@ class DataSourceEc2(sources.DataSource): if util.is_FreeBSD(): LOG.debug("FreeBSD doesn't support running dhclient with -sf") return False - dhcp_leases = dhcp.maybe_perform_dhcp_discovery( - self.fallback_interface) - if not dhcp_leases: - # DataSourceEc2Local failed in init-local stage. DataSourceEc2 - # will still run in init-network stage. + try: + with EphemeralDHCPv4(self.fallback_interface): + return util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self._crawl_metadata) + except NoDHCPLeaseError: return False - dhcp_opts = dhcp_leases[-1] - net_params = {'interface': dhcp_opts.get('interface'), - 'ip': dhcp_opts.get('fixed-address'), - 'prefix_or_mask': dhcp_opts.get('subnet-mask'), - 'broadcast': dhcp_opts.get('broadcast-address'), - 'router': dhcp_opts.get('routers')} - with net.EphemeralIPv4Network(**net_params): - return util.log_time( - logfunc=LOG.debug, msg='Crawl of metadata service', - func=self._crawl_metadata) else: return self._crawl_metadata() diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 6cda5721..90c12df1 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -199,10 +199,10 @@ class WALinuxAgentShim(object): ' ', '']) - def __init__(self, fallback_lease_file=None): + def __init__(self, fallback_lease_file=None, dhcp_options=None): LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s', fallback_lease_file) - self.dhcpoptions = None + self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None self.values = {} @@ -220,7 +220,8 @@ class WALinuxAgentShim(object): @property def endpoint(self): if self._endpoint is None: - self._endpoint = self.find_endpoint(self.lease_file) + self._endpoint = self.find_endpoint(self.lease_file, + self.dhcpoptions) return self._endpoint @staticmethod @@ -292,10 +293,14 @@ class WALinuxAgentShim(object): return _value @staticmethod - def find_endpoint(fallback_lease_file=None): + def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None - LOG.debug('Finding Azure endpoint from networkd...') - value = WALinuxAgentShim._networkd_get_value_from_leases() + if dhcp245 is not None: + value = dhcp245 + LOG.debug("Using Azure Endpoint from dhcp options") + if value is None: + LOG.debug('Finding Azure endpoint from networkd...') + value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/.json # a dhclient exit hook that calls cloud-init-dhclient-hook @@ -367,8 +372,9 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None): - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file) +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): + shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, + dhcp_options=dhcp_opts) try: return shim.register_with_azure_and_fetch_data() finally: diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py index 5d7adf70..c98a1b53 100644 --- a/cloudinit/temp_utils.py +++ b/cloudinit/temp_utils.py @@ -28,13 +28,18 @@ def _tempfile_dir_arg(odir=None, needs_exe=False): if odir is not None: return odir + if needs_exe: + tdir = _EXE_ROOT_TMPDIR + if not os.path.isdir(tdir): + os.makedirs(tdir) + os.chmod(tdir, 0o1777) + return tdir + global _TMPDIR if _TMPDIR: return _TMPDIR - if needs_exe: - tdir = _EXE_ROOT_TMPDIR - elif os.getuid() == 0: + if os.getuid() == 0: tdir = _ROOT_TMPDIR else: tdir = os.environ.get('TMPDIR', '/tmp') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0e0f5b4c..0a5be0b3 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -273,7 +273,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, headers_cb=None, sleep_time=1, - exception_cb=None): + exception_cb=None, sleep_time_cb=None): """ urls: a list of urls to try max_wait: roughly the maximum time to wait before giving up @@ -286,6 +286,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, for request. exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. + sleep_time_cb: call method with 2 arguments (response, loop_n) that + generates the next sleep time. the idea of this routine is to wait for the EC2 metdata service to come up. On both Eucalyptus and EC2 we have seen the case where @@ -301,6 +303,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, service but is not going to find one. It is possible that the instance data host (169.254.169.254) may be firewalled off Entirely for a sytem, meaning that the connection will block forever unless a timeout is set. + + A value of None for max_wait will retry indefinitely. """ start_time = time.time() @@ -311,18 +315,24 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb = log_status_cb def timeup(max_wait, start_time): - return ((max_wait <= 0 or max_wait is None) or - (time.time() - start_time > max_wait)) + if (max_wait is None): + return False + return ((max_wait <= 0) or (time.time() - start_time > max_wait)) loop_n = 0 + response = None while True: - sleep_time = int(loop_n / 5) + 1 + if sleep_time_cb is not None: + sleep_time = sleep_time_cb(response, loop_n) + else: + sleep_time = int(loop_n / 5) + 1 for url in urls: now = time.time() if loop_n != 0: if timeup(max_wait, start_time): break - if timeout and (now + timeout > (start_time + max_wait)): + if (max_wait is not None and + timeout and (now + timeout > (start_time + max_wait))): # shorten timeout to not run way over max_time timeout = int((start_time + max_wait) - now) @@ -354,10 +364,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, url_exc = e time_taken = int(time.time() - start_time) - status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url, - time_taken, - max_wait, - reason) + max_wait_str = "%ss" % max_wait if max_wait else "unlimited" + status_msg = "Calling '%s' failed [%s/%s]: %s" % (url, + time_taken, + max_wait_str, + reason) status_cb(status_msg) if exception_cb: # This can be used to alter the headers that will be sent diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 6341e1e8..254e9876 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -5,7 +5,7 @@ from cloudinit.util import b64e, decode_binary, load_file, write_file from cloudinit.sources import DataSourceAzure as dsaz from cloudinit.util import find_freebsd_part from cloudinit.util import get_path_dev_freebsd - +from cloudinit.version import version_string as vs from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, ExitStack, PY26, SkipTest) @@ -16,7 +16,8 @@ import xml.etree.ElementTree as ET import yaml -def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): +def construct_valid_ovf_env(data=None, pubkeys=None, + userdata=None, platform_settings=None): if data is None: data = {'HostName': 'FOOHOST'} if pubkeys is None: @@ -66,10 +67,12 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> kms.core.windows.net false - - - - """ + """ + if platform_settings: + for k, v in platform_settings.items(): + content += "<%s>%s\n" % (k, v, k) + content += """ +""" return content @@ -1107,4 +1110,146 @@ class TestAzureNetExists(CiTestCase): self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) +@mock.patch('cloudinit.sources.DataSourceAzure.util.subp') +@mock.patch.object(dsaz, 'get_hostname') +@mock.patch.object(dsaz, 'set_hostname') +class TestAzureDataSourcePreprovisioning(CiTestCase): + + def setUp(self): + super(TestAzureDataSourcePreprovisioning, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + def test_read_azure_ovf_with_true_flag(self, *args): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag if the proper setting is present.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_with_false_flag(self, *args): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag to false if the proper setting is false.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "False"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_without_flag(self, *args): + """The read_azure_ovf method should not set the + PreprovisionedVM cfg flag.""" + content = construct_valid_ovf_env() + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('requests.Session.request') + def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net, + m_is_bsd, *args): + """The _poll_imds method should return the ovf_env.xml.""" + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02' + host = "169.254.169.254" + full_url = url.format(host) + fake_resp.return_value = mock.MagicMock(status_code=200, text="ovf") + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(len(dsa._poll_imds()) > 0) + self.assertEqual(fake_resp.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', timeout=60.0, + url=full_url), + mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', url=full_url)]) + self.assertEqual(m_dhcp.call_count, 1) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1') + self.assertEqual(m_net.call_count, 1) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('requests.Session.request') + def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net, + m_is_bsd, *args): + """The _reprovision method should call poll IMDS.""" + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02' + host = "169.254.169.254" + full_url = url.format(host) + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + content = construct_valid_ovf_env(data=odata) + fake_resp.return_value = mock.MagicMock(status_code=200, text=content) + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + md, ud, cfg, d = dsa._reprovision() + self.assertEqual(md['local-hostname'], hostname) + self.assertEqual(cfg['system_info']['default_user']['name'], username) + self.assertEqual(fake_resp.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs()}, + method='GET', timeout=60.0, url=full_url), + mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs()}, + method='GET', url=full_url)]) + self.assertEqual(m_dhcp.call_count, 1) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1') + self.assertEqual(m_net.call_count, 1) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch('os.path.isfile') + def test__should_reprovision_with_true_cfg(self, isfile, write_f, *args): + """The _should_reprovision method should return true with config + flag present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'PreprovisionedVm': True}, None))) + + @mock.patch('os.path.isfile') + def test__should_reprovision_with_file_existing(self, isfile, *args): + """The _should_reprovision method should return True if the sentinal + exists.""" + isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'preprovisionedvm': False}, None))) + + @mock.patch('os.path.isfile') + def test__should_reprovision_returns_false(self, isfile, *args): + """The _should_reprovision method should return False + if config and sentinal are not present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(dsa._should_reprovision((None, None, {}, None))) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index f0dc8338..0f7267bb 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -425,7 +425,7 @@ class TestEc2(test_helpers.HttprettyTestCase): self.logs.getvalue()) @httpretty.activate - @mock.patch('cloudinit.net.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.find_fallback_nic') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index ddea13d7..ac33e8ef 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2948,4 +2948,16 @@ class TestRenameInterfaces(CiTestCase): mock_subp.assert_has_calls(expected) +class TestNetworkState(CiTestCase): + + def test_bcast_addr(self): + """Test mask_and_ipv4_to_bcast_addr proper execution.""" + bcast_addr = network_state.mask_and_ipv4_to_bcast_addr + self.assertEqual("192.168.1.255", + bcast_addr("255.255.255.0", "192.168.1.1")) + self.assertEqual("128.42.7.255", + bcast_addr("255.255.248.0", "128.42.5.4")) + self.assertEqual("10.1.21.255", + bcast_addr("255.255.255.0", "10.1.21.4")) + # vi: ts=4 expandtab -- cgit v1.2.3 From bc84f5023f795c261e32cf0690b2d29e12cfaedd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Jan 2018 12:26:34 -0700 Subject: tests: Collect script output as binary, collect systemd journal, fix lxd. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds collection a gzip compressed systemd journal on systemd systems. The file can later be reviewed with:   zcat system.journal.gz > system.journal   journalctl --file=system.journal [-o short-monotonic ..] To support this:   * modify test harness infrastructure to not assume content is utf-8.   * fix lxd platform to support make '_execute' return bytes rather     than a string. https://github.com/lxc/pylxd/issues/268 Also switched the base collectors to use /bin/sh as others already did. --- tests/cloud_tests/collect.py | 7 +++++ tests/cloud_tests/platforms/lxd/instance.py | 43 +++++++++++++---------------- tests/cloud_tests/testcases.yaml | 27 ++++++++++++++---- tests/cloud_tests/testcases/base.py | 6 ++-- tests/cloud_tests/verify.py | 2 +- 5 files changed, 52 insertions(+), 33 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 33acbb1e..5ea88e50 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -24,6 +24,13 @@ def collect_script(instance, base_dir, script, script_name): (out, err, exit) = instance.run_script( script.encode(), rcs=False, description='collect: {}'.format(script_name)) + if err: + LOG.debug("collect script %s had stderr: %s", script_name, err) + if not isinstance(out, bytes): + raise util.PlatformError( + "Collection of '%s' returned type %s, expected bytes: %s" % + (script_name, type(out), out)) + c_util.write_file(os.path.join(base_dir, script_name), out) diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index 0d697c05..d2d2a1fd 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -6,6 +6,8 @@ import os import shutil from tempfile import mkdtemp +from cloudinit.util import subp, ProcessExecutionError + from ..instances import Instance @@ -29,6 +31,7 @@ class LXDInstance(Instance): platform, name, properties, config, features) self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name)) self._setup_console_log() + self.name = name @property def pylxd_container(self): @@ -55,33 +58,25 @@ class LXDInstance(Instance): if env is None: env = {} - if stdin is not None: - # pylxd does not support input to execute. - # https://github.com/lxc/pylxd/issues/244 - # - # The solution here is write a tmp file in the container - # and then execute a shell that sets it standard in to - # be from that file, removes it, and calls the comand. - tmpf = self.tmpfile() - self.write_data(tmpf, stdin) - ncmd = 'exec <"{tmpf}"; rm -f "{tmpf}"; exec "$@"' - command = (['sh', '-c', ncmd.format(tmpf=tmpf), 'stdinhack'] + - list(command)) + env_args = [] + if env: + env_args = ['env'] + ["%s=%s" for k, v in env.items()] # ensure instance is running and execute the command self.start() - # execute returns a ContainerExecuteResult, named tuple - # (exit_code, stdout, stderr) - res = self.pylxd_container.execute(command, environment=env) - - # get out, exit and err from pylxd return - if not hasattr(res, 'exit_code'): - # pylxd 2.1.3 and earlier only return out and err, no exit - raise RuntimeError( - "No 'exit_code' in pylxd.container.execute return.\n" - "pylxd > 2.2 is required.") - - return res.stdout, res.stderr, res.exit_code + + # Use cmdline client due to https://github.com/lxc/pylxd/issues/268 + exit_code = 0 + try: + stdout, stderr = subp( + ['lxc', 'exec', self.name, '--'] + env_args + list(command), + data=stdin, decode=False) + except ProcessExecutionError as e: + exit_code = e.exit_code + stdout = e.stdout + stderr = e.stderr + + return stdout, stderr, exit_code def read_data(self, remote_path, decode=False): """Read data from instance filesystem. diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml index 7183e017..8e0fb62f 100644 --- a/tests/cloud_tests/testcases.yaml +++ b/tests/cloud_tests/testcases.yaml @@ -7,22 +7,37 @@ base_test_data: #cloud-config collect_scripts: cloud-init.log: | - #!/bin/bash + #!/bin/sh cat /var/log/cloud-init.log cloud-init-output.log: | - #!/bin/bash + #!/bin/sh cat /var/log/cloud-init-output.log instance-id: | - #!/bin/bash + #!/bin/sh cat /run/cloud-init/.instance-id result.json: | - #!/bin/bash + #!/bin/sh cat /run/cloud-init/result.json status.json: | - #!/bin/bash + #!/bin/sh cat /run/cloud-init/status.json cloud-init-version: | - #!/bin/bash + #!/bin/sh dpkg-query -W -f='${Version}' cloud-init + system.journal.gz: | + #!/bin/sh + [ -d /run/systemd ] || { echo "not systemd."; exit 0; } + fail() { echo "ERROR:" "$@" 1>&2; exit 1; } + journal="" + for d in /run/log/journal /var/log/journal; do + for f in $d/*/system.journal; do + [ -f "$f" ] || continue + [ -z "$journal" ] || + fail "multiple journal found: $f $journal." + journal="$f" + done + done + [ -f "$journal" ] || fail "no journal file found." + gzip --to-stdout "$journal" # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 1c5b5405..20e95955 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -30,12 +30,14 @@ class CloudTestCase(unittest.TestCase): raise AssertionError('Key "{}" not in cloud config'.format(name)) return self.cloud_config[name] - def get_data_file(self, name): + def get_data_file(self, name, decode=True): """Get data file failing test if it is not present.""" if name not in self.data: raise AssertionError('File "{}" missing from collect data' .format(name)) - return self.data[name] + if not decode: + return self.data[name] + return self.data[name].decode('utf-8') def get_instance_id(self): """Get recorded instance id.""" diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index fc1efcfc..2a9fd520 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -29,7 +29,7 @@ def verify_data(base_dir, tests): data = {} test_dir = os.path.join(base_dir, test_name) for script_name in os.listdir(test_dir): - with open(os.path.join(test_dir, script_name), 'r') as fp: + with open(os.path.join(test_dir, script_name), 'rb') as fp: data[script_name] = fp.read() # get test suite and launch tests -- cgit v1.2.3 From 5e5dc9731f39e8b1df767fbaf850fbbd31355a1a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Jan 2018 12:51:03 -0500 Subject: OVF: Extend well-known labels to include OVFENV. Fujitsu Cloud Service attaches a ovf iso transport with a label 'OVFENV'. This seems to be a reasonable value as a label. While the for bug 1731868 would likely fix cloud-init on fujitsu cloud, this change will find it faster. LP: #1698669 --- tests/unittests/test_ds_identify.py | 8 +++++--- tools/ds-identify | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'tests') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index ad6c5cf4..31cc6223 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -338,7 +338,7 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('OVF-vmware-customization') def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): - """OVF is identified when iso9660 cdrom label has ovf-transport.""" + """OVF is identified by well-known iso9660 labels.""" ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF']) # Unset matching cdrom ovf schema content ovf_cdrom_by_label['files']['dev/sr0'] = 'No content match' @@ -346,10 +346,12 @@ class TestDsIdentify(CiTestCase): ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled") # Add recognized labels - for valid_fs_label in ['ovf-transport', 'OVF-TRANSPORT']: + valid_ovf_labels = ['ovf-transport', 'OVF-TRANSPORT', + "OVFENV", "ovfenv"] + for valid_ovf_label in valid_ovf_labels: ovf_cdrom_by_label['mocks'][0]['out'] = blkid_out([ {'DEVNAME': 'sr0', 'TYPE': 'iso9660', - 'LABEL': valid_fs_label}]) + 'LABEL': valid_ovf_label}]) self._check_via_dict( ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) diff --git a/tools/ds-identify b/tools/ds-identify index 374c3ad1..cd268242 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -664,7 +664,9 @@ is_cdrom_ovf() { esac # fast path known 'OVF' labels - [ "$label" = "OVF-TRANSPORT" -o "$label" = "ovf-transport" ] && return 0 + case "$label" in + OVF-TRANSPORT|ovf-transport|OVFENV|ovfenv) return 0;; + esac # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in -- cgit v1.2.3