diff options
147 files changed, 5053 insertions, 1211 deletions
@@ -10,3 +10,4 @@ parts prime stage *.snap +*.cover @@ -46,7 +46,7 @@ reports=no # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. -ignored-modules=six.moves,pkg_resources,httplib,http.client +ignored-modules=six.moves,pkg_resources,httplib,http.client,paramiko,simplestreams # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of @@ -56,5 +56,5 @@ ignored-classes=optparse.Values,thread._local # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. -generated-members=types,http.client,command_handlers +generated-members=types,http.client,command_handlers,m_.* @@ -1,3 +1,88 @@ +17.2: + - ds-identify: failure in NoCloud due to unset variable usage. + (LP: #1737704) + - tests: fix collect_console when not implemented [Joshua Powers] + - ec2: Use instance-identity doc for region and instance-id + [Andrew Jorgensen] + - tests: remove leaked tmp files in config drive tests. + - setup.py: Do not include rendered files in SOURCES.txt + - SUSE: remove delta in systemd local template for SUSE [Robert Schweikert] + - tests: move to using tox 1.7.5 + - OVF: improve ds-identify to support finding OVF iso transport. + (LP: #1731868) + - VMware: Support for user provided pre and post-customization scripts + [Maitreyee Saikia] + - citest: In NoCloudKVM provide keys via metadata not userdata. + - pylint: Update pylint to 1.7.1, run on tests/ and tools and fix + complaints. + - Datasources: Formalize DataSource get_data and related properties. + - cli: Add clean and status subcommands + - tests: consolidate platforms into specific dirs + - ec2: Fix sandboxed dhclient background process cleanup. (LP: #1735331) + - tests: NoCloudKVMImage do not modify the original local cache image. + - tests: Enable bionic in integration tests. [Joshua Powers] + - tests: Use apt-get to install a deb so that depends get resolved. + - sysconfig: Correctly render dns and dns search info. + [Ryan McCabe] (LP: #1705804) + - integration test: replace curtin test ppa with cloud-init test ppa. + - EC2: Fix bug using fallback_nic and metadata when restoring from cache. + (LP: #1732917) + - EC2: Kill dhclient process used in sandbox dhclient. (LP: #1732964) + - ntp: fix configuration template rendering for openSUSE and SLES + (LP: #1726572) + - centos: Provide the failed #include url in error messages + - Catch UrlError when #include'ing URLs [Andrew Jorgensen] + - hosts: Fix openSUSE and SLES setup for /etc/hosts and clarify docs. + [Robert Schweikert] (LP: #1731022) + - rh_subscription: Perform null checks for enabled and disabled repos. + [Dave Mulford] + - Improve warning message when a template is not found. + [Robert Schweikert] (LP: #1731035) + - Replace the temporary i9n.brickies.net with i9n.cloud-init.io. + - Azure: don't generate network configuration for SRIOV devices + (LP: #1721579) + - tests: address some minor feedback missed in last merge. + - tests: integration test cleanup and full pass of nocloud-kvm. + - Gentoo: chmod +x on all files in sysvinit/gentoo/ + [ckonstanski] (LP: #1727126) + - EC2: Limit network config to fallback nic, fix local-ipv4 only + instances. (LP: #1728152) + - Gentoo: Use "rc-service" rather than "service". + [Carlos Konstanski] (LP: #1727121) + - resizefs: Fix regression when system booted with root=PARTUUID= + (LP: #1725067) + - tools: make yum package installation more reliable + - citest: fix remaining warnings raised by integration tests. + - citest: show the class actual class name in results. + - ntp: fix config module schema to allow empty ntp config (LP: #1724951) + - tools: disable fastestmirror if using proxy [Joshua Powers] + - schema: Log debug instead of warning when jsonschema is not available. + (LP: #1724354) + - simpletable: Fix get_string method to return table-formatted string + (LP: #1722566) + - net: Handle bridge stp values of 0 and convert to boolean type + - tools: Give specific --abbrev=8 to "git describe" + - network: bridge_stp value not always correct (LP: #1721157) + - tests: re-enable tox with nocloud-kvm support [Joshua Powers] + - systemd: remove limit on tasks created by cloud-init-final.service. + [Robert Schweikert] (LP: #1717969) + - suse: Support addition of zypper repos via cloud-config. + [Robert Schweikert] (LP: #1718675) + - tests: Combine integration configs and testcases [Joshua Powers] + - Azure, CloudStack: Support reading dhcp options from systemd-networkd. + [Dimitri John Ledkov] (LP: #1718029) + - packages/debian/copyright: remove mention of boto and MIT license + - systemd: only mention Before=apt-daily.service on debian based distros. + [Robert Schweikert] + - Add missing simpletable and simpletable tests for failed merge + - Remove prettytable dependency, introduce simpletable [Andrew Jorgensen] + - debian/copyright: dep5 updates, reorganize, add Apache 2.0 license. + [Joshua Powers] (LP: #1718681) + - tests: remove dependency on shlex [Joshua Powers] + - AltCloud: Trust PATH for udevadm and modprobe. + - DataSourceOVF: use util.find_devs_with(TYPE=iso9660) (LP: #1718287) + - tests: remove a temp file used in bootcmd tests. + 17.1: - doc: document GCE datasource. [Arnd Hannemann] - suse: updates to templates to support openSUSE and SLES. diff --git a/HACKING.rst b/HACKING.rst index 93e3f424..3bb555c2 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -16,6 +16,14 @@ Do these things once When prompted for 'Project contact' or 'Canonical Project Manager' enter 'Scott Moser'. +* Configure git with your email and name for commit messages. + + Your name will appear in commit messages and will also be used in + changelogs or release notes. Give yourself credit!:: + + git config user.name "Your Name" + git config user.email "Your Email" + * Clone the upstream `repository`_ on Launchpad:: git clone https://git.launchpad.net/cloud-init diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 69b9e43e..3ba5903f 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -6,6 +6,8 @@ import argparse import re import sys +from cloudinit.util import json_dumps + from . import dump from . import show @@ -112,7 +114,7 @@ def analyze_show(name, args): def analyze_dump(name, args): """Dump cloud-init events in json format""" (infh, outfh) = configure_io(args) - outfh.write(dump.json_dumps(_get_events(infh)) + '\n') + outfh.write(json_dumps(_get_events(infh)) + '\n') def _get_events(infile): diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index ca4da496..b071aa19 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -2,7 +2,6 @@ import calendar from datetime import datetime -import json import sys from cloudinit import util @@ -132,11 +131,6 @@ def parse_ci_logline(line): return event -def json_dumps(data): - return json.dumps(data, indent=1, sort_keys=True, - separators=(',', ': ')) - - def dump_events(cisource=None, rawdata=None): events = [] event = None @@ -169,7 +163,7 @@ def main(): else: cisource = sys.stdin - return json_dumps(dump_events(cisource)) + return util.json_dumps(dump_events(cisource)) if __name__ == "__main__": diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py new file mode 100644 index 00000000..de22f7f2 --- /dev/null +++ b/cloudinit/cmd/clean.py @@ -0,0 +1,103 @@ +# Copyright (C) 2017 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Define 'clean' utility and handler as part of cloud-init commandline.""" + +import argparse +import os +import sys + +from cloudinit.stages import Init +from cloudinit.util import ( + ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, + is_link, subp) + + +def error(msg): + sys.stderr.write("ERROR: " + msg + "\n") + + +def get_parser(parser=None): + """Build or extend an arg parser for clean utility. + + @param parser: Optional existing ArgumentParser instance representing the + clean subcommand which will be extended to support the args of + this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser( + prog='clean', + description=('Remove logs and artifacts so cloud-init re-runs on ' + 'a clean system')) + parser.add_argument( + '-l', '--logs', action='store_true', default=False, dest='remove_logs', + help='Remove cloud-init logs.') + parser.add_argument( + '-r', '--reboot', action='store_true', default=False, + help='Reboot system after logs are cleaned so cloud-init re-runs.') + parser.add_argument( + '-s', '--seed', action='store_true', default=False, dest='remove_seed', + help='Remove cloud-init seed directory /var/lib/cloud/seed.') + return parser + + +def remove_artifacts(remove_logs, remove_seed=False): + """Helper which removes artifacts dir and optionally log files. + + @param: remove_logs: Boolean. Set True to delete the cloud_dir path. False + preserves them. + @param: remove_seed: Boolean. Set True to also delete seed subdir in + paths.cloud_dir. + @returns: 0 on success, 1 otherwise. + """ + init = Init(ds_deps=[]) + init.read_cfg() + if remove_logs: + for log_file in get_config_logfiles(init.cfg): + del_file(log_file) + + if not os.path.isdir(init.paths.cloud_dir): + return 0 # Artifacts dir already cleaned + with chdir(init.paths.cloud_dir): + for path in os.listdir('.'): + if path == 'seed' and not remove_seed: + continue + try: + if os.path.isdir(path) and not is_link(path): + del_dir(path) + else: + del_file(path) + except OSError as e: + error('Could not remove {0}: {1}'.format(path, str(e))) + return 1 + return 0 + + +def handle_clean_args(name, args): + """Handle calls to 'cloud-init clean' as a subcommand.""" + exit_code = remove_artifacts(args.remove_logs, args.remove_seed) + if exit_code == 0 and args.reboot: + cmd = ['shutdown', '-r', 'now'] + try: + subp(cmd, capture=False) + except ProcessExecutionError as e: + error( + 'Could not reboot this system using "{0}": {1}'.format( + cmd, str(e))) + exit_code = 1 + return exit_code + + +def main(): + """Tool to collect and tar all cloud-init related logs.""" + parser = get_parser() + sys.exit(handle_clean_args('clean', parser.parse_args())) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 6fb9d9e7..d2f1b778 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -421,7 +421,13 @@ def di_report_warn(datasource, cfg): LOG.debug("no di_report found in config.") return - dicfg = cfg.get('di_report', {}) + dicfg = cfg['di_report'] + if dicfg is None: + # ds-identify may write 'di_report:\n #comment\n' + # which reads as {'di_report': None} + LOG.debug("di_report was None.") + return + if not isinstance(dicfg, dict): LOG.warning("di_report config not a dictionary: %s", dicfg) return @@ -603,7 +609,11 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ('init', 'init-local', 'modules-config', 'modules-final') + modes = ('init', 'init-local', 'modules-init', 'modules-config', + 'modules-final') + if mode not in modes: + raise ValueError( + "Invalid cloud init mode specified '{0}'".format(mode)) status = None if mode == 'init-local': @@ -615,16 +625,18 @@ def status_wrapper(name, args, data_d=None, link_d=None): except Exception: pass + nullstatus = { + 'errors': [], + 'start': None, + 'finished': None, + } if status is None: - nullstatus = { - 'errors': [], - 'start': None, - 'finished': None, - } status = {'v1': {}} for m in modes: status['v1'][m] = nullstatus.copy() status['v1']['datasource'] = None + elif mode not in status['v1']: + status['v1'][mode] = nullstatus.copy() v1 = status['v1'] v1['stage'] = mode @@ -767,6 +779,12 @@ def main(sysv_args=None): parser_collect_logs = subparsers.add_parser( 'collect-logs', help='Collect and tar all cloud-init debug info') + parser_clean = subparsers.add_parser( + 'clean', help='Remove logs and artifacts so cloud-init can re-run.') + + parser_status = subparsers.add_parser( + 'status', help='Report cloud-init status or wait on completion.') + if sysv_args: # Only load subparsers if subcommand is specified to avoid load cost if sysv_args[0] == 'analyze': @@ -783,6 +801,18 @@ def main(sysv_args=None): logs_parser(parser_collect_logs) parser_collect_logs.set_defaults( action=('collect-logs', handle_collect_logs_args)) + elif sysv_args[0] == 'clean': + from cloudinit.cmd.clean import ( + get_parser as clean_parser, handle_clean_args) + clean_parser(parser_clean) + parser_clean.set_defaults( + action=('clean', handle_clean_args)) + elif sysv_args[0] == 'status': + from cloudinit.cmd.status import ( + get_parser as status_parser, handle_status_args) + status_parser(parser_status) + parser_status.set_defaults( + action=('status', handle_status_args)) args = parser.parse_args(args=sysv_args) diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py new file mode 100644 index 00000000..d7aaee9d --- /dev/null +++ b/cloudinit/cmd/status.py @@ -0,0 +1,160 @@ +# Copyright (C) 2017 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Define 'status' utility and handler as part of cloud-init commandline.""" + +import argparse +import os +import sys +from time import gmtime, strftime, sleep + +from cloudinit.distros import uses_systemd +from cloudinit.stages import Init +from cloudinit.util import get_cmdline, load_file, load_json + +CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' + +# customer visible status messages +STATUS_ENABLED_NOT_RUN = 'not run' +STATUS_RUNNING = 'running' +STATUS_DONE = 'done' +STATUS_ERROR = 'error' +STATUS_DISABLED = 'disabled' + + +def get_parser(parser=None): + """Build or extend an arg parser for status utility. + + @param parser: Optional existing ArgumentParser instance representing the + status subcommand which will be extended to support the args of + this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser( + prog='status', + description='Report run status of cloud init') + parser.add_argument( + '-l', '--long', action='store_true', default=False, + help=('Report long format of statuses including run stage name and' + ' error messages')) + parser.add_argument( + '-w', '--wait', action='store_true', default=False, + help='Block waiting on cloud-init to complete') + return parser + + +def handle_status_args(name, args): + """Handle calls to 'cloud-init status' as a subcommand.""" + # Read configured paths + init = Init(ds_deps=[]) + init.read_cfg() + + status, status_detail, time = _get_status_details(init.paths) + if args.wait: + while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): + sys.stdout.write('.') + sys.stdout.flush() + status, status_detail, time = _get_status_details(init.paths) + sleep(0.25) + sys.stdout.write('\n') + if args.long: + print('status: {0}'.format(status)) + if time: + print('time: {0}'.format(time)) + print('detail:\n{0}'.format(status_detail)) + else: + print('status: {0}'.format(status)) + return 1 if status == STATUS_ERROR else 0 + + +def _is_cloudinit_disabled(disable_file, paths): + """Report whether cloud-init is disabled. + + @param disable_file: The path to the cloud-init disable file. + @param paths: An initialized cloudinit.helpers.Paths object. + @returns: A tuple containing (bool, reason) about cloud-init's status and + why. + """ + is_disabled = False + cmdline_parts = get_cmdline().split() + if not uses_systemd(): + reason = 'Cloud-init enabled on sysvinit' + elif 'cloud-init=enabled' in cmdline_parts: + reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' + elif os.path.exists(disable_file): + is_disabled = True + reason = 'Cloud-init disabled by {0}'.format(disable_file) + elif 'cloud-init=disabled' in cmdline_parts: + is_disabled = True + reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' + elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): + is_disabled = True + reason = 'Cloud-init disabled by cloud-init-generator' + else: + reason = 'Cloud-init enabled by systemd cloud-init-generator' + return (is_disabled, reason) + + +def _get_status_details(paths): + """Return a 3-tuple of status, status_details and time of last event. + + @param paths: An initialized cloudinit.helpers.paths object. + + Values are obtained from parsing paths.run_dir/status.json. + """ + + status = STATUS_ENABLED_NOT_RUN + status_detail = '' + status_v1 = {} + + status_file = os.path.join(paths.run_dir, 'status.json') + + (is_disabled, reason) = _is_cloudinit_disabled( + CLOUDINIT_DISABLED_FILE, paths) + if is_disabled: + status = STATUS_DISABLED + status_detail = reason + if os.path.exists(status_file): + status_v1 = load_json(load_file(status_file)).get('v1', {}) + errors = [] + latest_event = 0 + for key, value in sorted(status_v1.items()): + if key == 'stage': + if value: + status_detail = 'Running in stage: {0}'.format(value) + elif key == 'datasource': + status_detail = value + elif isinstance(value, dict): + errors.extend(value.get('errors', [])) + start = value.get('start') or 0 + finished = value.get('finished') or 0 + if finished == 0 and start != 0: + status = STATUS_RUNNING + event_time = max(start, finished) + if event_time > latest_event: + latest_event = event_time + if errors: + status = STATUS_ERROR + status_detail = '\n'.join(errors) + elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: + status = STATUS_DONE + if latest_event: + time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) + else: + time = '' + return status, status_detail, time + + +def main(): + """Tool to report status of cloud-init.""" + parser = get_parser() + sys.exit(handle_status_args('status', parser.parse_args())) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/__init__.py b/cloudinit/cmd/tests/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/cloudinit/cmd/tests/__init__.py diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py new file mode 100644 index 00000000..6713af4f --- /dev/null +++ b/cloudinit/cmd/tests/test_clean.py @@ -0,0 +1,176 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.cmd import clean +from cloudinit.util import ensure_dir, sym_link, write_file +from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock +from collections import namedtuple +import os +from six import StringIO + +mypaths = namedtuple('MyPaths', 'cloud_dir') + + +class TestClean(CiTestCase): + + def setUp(self): + super(TestClean, self).setUp() + self.new_root = self.tmp_dir() + self.artifact_dir = self.tmp_path('artifacts', self.new_root) + self.log1 = self.tmp_path('cloud-init.log', self.new_root) + self.log2 = self.tmp_path('cloud-init-output.log', self.new_root) + + class FakeInit(object): + cfg = {'def_log_file': self.log1, + 'output': {'all': '|tee -a {0}'.format(self.log2)}} + paths = mypaths(cloud_dir=self.artifact_dir) + + def __init__(self, ds_deps): + pass + + def read_cfg(self): + pass + + self.init_class = FakeInit + + def test_remove_artifacts_removes_logs(self): + """remove_artifacts removes logs when remove_logs is True.""" + write_file(self.log1, 'cloud-init-log') + write_file(self.log2, 'cloud-init-output-log') + + self.assertFalse( + os.path.exists(self.artifact_dir), 'Unexpected artifacts dir') + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=True) + self.assertFalse(os.path.exists(self.log1), 'Unexpected file') + self.assertFalse(os.path.exists(self.log2), 'Unexpected file') + self.assertEqual(0, retcode) + + def test_remove_artifacts_preserves_logs(self): + """remove_artifacts leaves logs when remove_logs is False.""" + write_file(self.log1, 'cloud-init-log') + write_file(self.log2, 'cloud-init-output-log') + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertTrue(os.path.exists(self.log1), 'Missing expected file') + self.assertTrue(os.path.exists(self.log2), 'Missing expected file') + self.assertEqual(0, retcode) + + def test_remove_artifacts_removes_unlinks_symlinks(self): + """remove_artifacts cleans artifacts dir unlinking any symlinks.""" + dir1 = os.path.join(self.artifact_dir, 'dir1') + ensure_dir(dir1) + symlink = os.path.join(self.artifact_dir, 'mylink') + sym_link(dir1, symlink) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(0, retcode) + for path in (dir1, symlink): + self.assertFalse( + os.path.exists(path), + 'Unexpected {0} dir'.format(path)) + + def test_remove_artifacts_removes_artifacts_skipping_seed(self): + """remove_artifacts cleans artifacts dir with exception of seed dir.""" + dirs = [ + self.artifact_dir, + os.path.join(self.artifact_dir, 'seed'), + os.path.join(self.artifact_dir, 'dir1'), + os.path.join(self.artifact_dir, 'dir2')] + for _dir in dirs: + ensure_dir(_dir) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(0, retcode) + for expected_dir in dirs[:2]: + self.assertTrue( + os.path.exists(expected_dir), + 'Missing {0} dir'.format(expected_dir)) + for deleted_dir in dirs[2:]: + self.assertFalse( + os.path.exists(deleted_dir), + 'Unexpected {0} dir'.format(deleted_dir)) + + def test_remove_artifacts_removes_artifacts_removes_seed(self): + """remove_artifacts removes seed dir when remove_seed is True.""" + dirs = [ + self.artifact_dir, + os.path.join(self.artifact_dir, 'seed'), + os.path.join(self.artifact_dir, 'dir1'), + os.path.join(self.artifact_dir, 'dir2')] + for _dir in dirs: + ensure_dir(_dir) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False, remove_seed=True) + self.assertEqual(0, retcode) + self.assertTrue( + os.path.exists(self.artifact_dir), 'Missing artifact dir') + for deleted_dir in dirs[1:]: + self.assertFalse( + os.path.exists(deleted_dir), + 'Unexpected {0} dir'.format(deleted_dir)) + + def test_remove_artifacts_returns_one_on_errors(self): + """remove_artifacts returns non-zero on failure and prints an error.""" + ensure_dir(self.artifact_dir) + ensure_dir(os.path.join(self.artifact_dir, 'dir1')) + + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'del_dir': {'side_effect': OSError('oops')}, + 'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(1, retcode) + self.assertEqual( + 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue()) + + def test_handle_clean_args_reboots(self): + """handle_clean_args_reboots when reboot arg is provided.""" + + called_cmds = [] + + def fake_subp(cmd, capture): + called_cmds.append((cmd, capture)) + return '', '' + + myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot') + cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True) + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'subp': {'side_effect': fake_subp}, + 'Init': {'side_effect': self.init_class}}, + clean.handle_clean_args, name='does not matter', args=cmdargs) + self.assertEqual(0, retcode) + self.assertEqual( + [(['shutdown', '-r', 'now'], False)], called_cmds) + + def test_status_main(self): + '''clean.main can be run as a standalone script.''' + write_file(self.log1, 'cloud-init-log') + with self.assertRaises(SystemExit) as context_manager: + wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}, + 'sys.argv': {'new': ['clean', '--logs']}}, + clean.main) + + self.assertRaisesCodeEqual(0, context_manager.exception.code) + self.assertFalse( + os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) + + +# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py new file mode 100644 index 00000000..a7c0a91a --- /dev/null +++ b/cloudinit/cmd/tests/test_status.py @@ -0,0 +1,368 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +import os +from six import StringIO +from textwrap import dedent + +from cloudinit.atomic_helper import write_json +from cloudinit.cmd import status +from cloudinit.util import write_file +from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock + +mypaths = namedtuple('MyPaths', 'run_dir') +myargs = namedtuple('MyArgs', 'long wait') + + +class TestStatus(CiTestCase): + + def setUp(self): + super(TestStatus, self).setUp() + self.new_root = self.tmp_dir() + self.status_file = self.tmp_path('status.json', self.new_root) + self.disable_file = self.tmp_path('cloudinit-disable', self.new_root) + self.paths = mypaths(run_dir=self.new_root) + + class FakeInit(object): + paths = self.paths + + def __init__(self, ds_deps): + pass + + def read_cfg(self): + pass + + self.init_class = FakeInit + + def test__is_cloudinit_disabled_false_on_sysvinit(self): + '''When not in an environment using systemd, return False.''' + write_file(self.disable_file, '') # Create the ignored disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': False}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse( + is_disabled, 'expected enabled cloud-init on sysvinit') + self.assertEqual('Cloud-init enabled on sysvinit', reason) + + def test__is_cloudinit_disabled_true_on_disable_file(self): + '''When using systemd and disable_file is present return disabled.''' + write_file(self.disable_file, '') # Create observed disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual( + 'Cloud-init disabled by {0}'.format(self.disable_file), reason) + + def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): + '''Not disabled when using systemd and enabled via commandline.''' + write_file(self.disable_file, '') # Create ignored disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something cloud-init=enabled else'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse(is_disabled, 'expected enabled cloud-init') + self.assertEqual( + 'Cloud-init enabled by kernel command line cloud-init=enabled', + reason) + + def test__is_cloudinit_disabled_true_on_kernel_cmdline(self): + '''When using systemd and disable_file is present return disabled.''' + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something cloud-init=disabled else'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual( + 'Cloud-init disabled by kernel parameter cloud-init=disabled', + reason) + + def test__is_cloudinit_disabled_true_when_generator_disables(self): + '''When cloud-init-generator doesn't write enabled file return True.''' + enabled_file = os.path.join(self.paths.run_dir, 'enabled') + self.assertFalse(os.path.exists(enabled_file)) + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual('Cloud-init disabled by cloud-init-generator', reason) + + def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): + '''Report enabled when systemd generator creates the enabled file.''' + enabled_file = os.path.join(self.paths.run_dir, 'enabled') + write_file(enabled_file, '') + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something ignored'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse(is_disabled, 'expected enabled cloud-init') + self.assertEqual( + 'Cloud-init enabled by systemd cloud-init-generator', reason) + + def test_status_returns_not_run(self): + '''When status.json does not exist yet, return 'not run'.''' + self.assertFalse( + os.path.exists(self.status_file), 'Unexpected status.json found') + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: not run\n', m_stdout.getvalue()) + + def test_status_returns_disabled_long_on_presence_of_disable_file(self): + '''When cloudinit is disabled, return disabled reason.''' + + checked_files = [] + + def fakeexists(filepath): + checked_files.append(filepath) + status_file = os.path.join(self.paths.run_dir, 'status.json') + return bool(not filepath == status_file) + + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'os.path.exists': {'side_effect': fakeexists}, + '_is_cloudinit_disabled': (True, 'disabled for some reason'), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual( + [os.path.join(self.paths.run_dir, 'status.json')], + checked_files) + expected = dedent('''\ + status: disabled + detail: + disabled for some reason + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_returns_running(self): + '''Report running when status exists with an unfinished stage.''' + write_json(self.status_file, + {'v1': {'init': {'start': 1, 'finished': None}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: running\n', m_stdout.getvalue()) + + def test_status_returns_done(self): + '''Reports done when stage is None and all stages are finished.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'blah': {'finished': 123.456}, + 'init': {'errors': [], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: done\n', m_stdout.getvalue()) + + def test_status_returns_done_long(self): + '''Long format of done status includes datasource info.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'init': {'start': 124.567, 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + expected = dedent('''\ + status: done + time: Thu, 01 Jan 1970 00:02:05 +0000 + detail: + DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_on_errors(self): + '''Reports error when any stage has errors.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'blah': {'errors': [], 'finished': 123.456}, + 'init': {'errors': ['error1'], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + self.assertEqual('status: error\n', m_stdout.getvalue()) + + def test_status_on_errors_long(self): + '''Long format of error status includes all error messages.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'init': {'errors': ['error1'], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'errors': ['error2', 'error3'], + 'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + expected = dedent('''\ + status: error + time: Thu, 01 Jan 1970 00:02:05 +0000 + detail: + error1 + error2 + error3 + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_returns_running_long_format(self): + '''Long format reports the stage in which we are running.''' + write_json( + self.status_file, + {'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + expected = dedent('''\ + status: running + time: Thu, 01 Jan 1970 00:02:04 +0000 + detail: + Running in stage: init + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_wait_blocks_until_done(self): + '''Specifying wait will poll every 1/4 second until done state.''' + running_json = { + 'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + done_json = { + 'v1': {'stage': None, + 'init': {'start': 124.456, 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + + self.sleep_calls = 0 + + def fake_sleep(interval): + self.assertEqual(0.25, interval) + self.sleep_calls += 1 + if self.sleep_calls == 2: + write_json(self.status_file, running_json) + elif self.sleep_calls == 3: + write_json(self.status_file, done_json) + + cmdargs = myargs(long=False, wait=True) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'sleep': {'side_effect': fake_sleep}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual(4, self.sleep_calls) + self.assertEqual('....\nstatus: done\n', m_stdout.getvalue()) + + def test_status_wait_blocks_until_error(self): + '''Specifying wait will poll every 1/4 second until error state.''' + running_json = { + 'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + error_json = { + 'v1': {'stage': None, + 'init': {'errors': ['error1'], 'start': 124.456, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + + self.sleep_calls = 0 + + def fake_sleep(interval): + self.assertEqual(0.25, interval) + self.sleep_calls += 1 + if self.sleep_calls == 2: + write_json(self.status_file, running_json) + elif self.sleep_calls == 3: + write_json(self.status_file, error_json) + + cmdargs = myargs(long=False, wait=True) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'sleep': {'side_effect': fake_sleep}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + self.assertEqual(4, self.sleep_calls) + self.assertEqual('....\nstatus: error\n', m_stdout.getvalue()) + + def test_status_main(self): + '''status.main can be run as a standalone script.''' + write_json(self.status_file, + {'v1': {'init': {'start': 1, 'finished': None}}}) + with self.assertRaises(SystemExit) as context_manager: + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + wrap_and_call( + 'cloudinit.cmd.status', + {'sys.argv': {'new': ['status']}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.main) + self.assertRaisesCodeEqual(0, context_manager.exception.code) + self.assertEqual('status: running\n', m_stdout.getvalue()) + +# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 177cbcf7..5b9cbca0 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -275,8 +275,9 @@ def handle(name, ocfg, cloud, log, _): cfg = ocfg.get('apt', {}) if not isinstance(cfg, dict): - raise ValueError("Expected dictionary for 'apt' config, found %s", - type(cfg)) + raise ValueError( + "Expected dictionary for 'apt' config, found {config_type}".format( + config_type=type(cfg))) apply_debconf_selections(cfg, target) apply_apt(cfg, cloud, target) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index c2b83aea..c3e8c484 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -788,7 +788,8 @@ def mkpart(device, definition): # This prevents you from overwriting the device LOG.debug("Checking if device %s is a valid device", device) if not is_device_valid(device): - raise Exception("Device %s is not a disk device!", device) + raise Exception( + 'Device {device} is not a disk device!'.format(device=device)) # Remove the partition table entries if isinstance(layout, str) and layout.lower() == "remove": @@ -945,8 +946,9 @@ def mkfs(fs_cfg): # Check that we can create the FS if not (fs_type or fs_cmd): - raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd " - "must be set.", label) + raise Exception( + "No way to create filesystem '{label}'. fs_type or fs_cmd " + "must be set.".format(label=label)) # Create the commands shell = False diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 8f9f1abd..eaf1e940 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -94,10 +94,10 @@ def handle(_name, cfg, cloud, log, _args): ls_cloudcfg = cfg.get("landscape", {}) if not isinstance(ls_cloudcfg, (dict)): - raise RuntimeError(("'landscape' key existed in config," - " but not a dictionary type," - " is a %s instead"), - type_utils.obj_name(ls_cloudcfg)) + raise RuntimeError( + "'landscape' key existed in config, but not a dictionary type," + " is a {_type} instead".format( + _type=type_utils.obj_name(ls_cloudcfg))) if not ls_cloudcfg: return diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index f50bcb35..cbd0237d 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -106,9 +106,9 @@ def handle(name, cfg, cloud, log, _args): # TODO drop this when validate_cloudconfig_schema is strict=True if not isinstance(ntp_cfg, (dict)): - raise RuntimeError(("'ntp' key existed in config," - " but not a dictionary type," - " is a %s %instead"), type_utils.obj_name(ntp_cfg)) + raise RuntimeError( + "'ntp' key existed in config, but not a dictionary type," + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) validate_cloudconfig_schema(cfg, schema) if ntp_installable(): @@ -206,8 +206,8 @@ def write_ntp_config_template(cfg, cloud, path, template=None): if not template_fn: template_fn = cloud.get_template_filename('ntp.conf') if not template_fn: - raise RuntimeError(("No template found, " - "not rendering %s"), path) + raise RuntimeError( + 'No template found, not rendering {path}'.format(path=path)) templater.render_to_file(template_fn, path, params) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index eba58b02..4da3a588 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -194,6 +194,7 @@ def doexit(sysexit): def execmd(exe_args, output=None, data_in=None): + ret = 1 try: proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, stdout=output, stderr=subprocess.STDOUT) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 0d282e63..cec22bb7 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -59,7 +59,17 @@ __doc__ = get_schema_doc(schema) # Supplement python help() def _resize_btrfs(mount_point, devpth): - return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + # If "/" is ro resize will fail. However it should be allowed since resize + # makes everything bigger and subvolumes that are not ro will benefit. + # Use a subvolume that is not ro to trick the resize operation to do the + # "right" thing. The use of ".snapshot" is specific to "snapper" a generic + # solution would be walk the subvolumes and find a rw mounted subvolume. + if (not util.mount_is_read_write(mount_point) and + os.path.isdir("%s/.snapshots" % mount_point)): + return ('btrfs', 'filesystem', 'resize', 'max', + '%s/.snapshots' % mount_point) + else: + return ('btrfs', 'filesystem', 'resize', 'max', mount_point) def _resize_ext(mount_point, devpth): diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index a9d21e78..530808ce 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -276,9 +276,8 @@ class SubscriptionManager(object): cmd = ['attach', '--auto'] try: return_out, return_err = self._sub_man_cli(cmd) - except util.ProcessExecutionError: - self.log_warn("Auto-attach failed with: " - "{0}]".format(return_err.strip())) + except util.ProcessExecutionError as e: + self.log_warn("Auto-attach failed with: {0}".format(e)) return False for line in return_out.split("\n"): if line is not "": diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 50ff9e35..af08788c 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -20,15 +20,15 @@ which defaults to ``20-cloud-config.conf``. The rsyslog config directory to write config files to may be specified in ``config_dir``, which defaults to ``/etc/rsyslog.d``. -A list of configurations for for rsyslog can be specified under the ``configs`` -key in the ``rsyslog`` config. Each entry in ``configs`` is either a string or -a dictionary. Each config entry contains a configuration string and a file to +A list of configurations for rsyslog can be specified under the ``configs`` key +in the ``rsyslog`` config. Each entry in ``configs`` is either a string or a +dictionary. Each config entry contains a configuration string and a file to write it to. For config entries that are a dictionary, ``filename`` sets the target filename and ``content`` specifies the config string to write. For config entries that are only a string, the string is used as the config string to write. If the filename to write the config to is not specified, the value of -the ``config_filename`` key is used. A file with the selected filename will -be written inside the directory specified by ``config_dir``. +the ``config_filename`` key is used. A file with the selected filename will be +written inside the directory specified by ``config_dir``. The command to use to reload the rsyslog service after the config has been updated can be specified in ``service_reload_command``. If this is set to diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index e76b9c09..65f6e777 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -95,7 +95,8 @@ def handle_random_seed_command(command, required, env=None): cmd = command[0] if not util.which(cmd): if required: - raise ValueError("command '%s' not found but required=true", cmd) + raise ValueError( + "command '{cmd}' not found but required=true".format(cmd=cmd)) else: LOG.debug("command '%s' not found for seed_command", cmd) return diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py index fe0cc73e..e82c0811 100644 --- a/cloudinit/config/cc_snap_config.py +++ b/cloudinit/config/cc_snap_config.py @@ -87,7 +87,9 @@ def add_assertions(assertions=None): assertions = [] if not isinstance(assertions, list): - raise ValueError('assertion parameter was not a list: %s', assertions) + raise ValueError( + 'assertion parameter was not a list: {assertions}'.format( + assertions=assertions)) snap_cmd = [SNAPPY_CMD, 'ack'] combined = "\n".join(assertions) @@ -115,7 +117,8 @@ def add_snap_user(cfg=None): cfg = {} if not isinstance(cfg, dict): - raise ValueError('configuration parameter was not a dict: %s', cfg) + raise ValueError( + 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg)) snapuser = cfg.get('email', None) if not snapuser: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index d5becd12..55260eae 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -45,6 +45,10 @@ OSFAMILIES = { LOG = logging.getLogger(__name__) +# This is a best guess regex, based on current EC2 AZs on 2017-12-11. +# It could break when Amazon adds new regions and new AZs. +_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') + @six.add_metaclass(abc.ABCMeta) class Distro(object): @@ -102,11 +106,8 @@ class Distro(object): self._apply_hostname(writeable_hostname) def uses_systemd(self): - try: - res = os.lstat('/run/systemd/system') - return stat.S_ISDIR(res.st_mode) - except Exception: - return False + """Wrapper to report whether this distro uses systemd or sysvinit.""" + return uses_systemd() @abc.abstractmethod def package_command(self, cmd, args=None, pkgs=None): @@ -686,18 +687,13 @@ def _get_package_mirror_info(mirror_info, data_source=None, if not mirror_info: mirror_info = {} - # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) - # the region is us-east-1. so region = az[0:-1] - directions_re = '|'.join([ - 'central', 'east', 'north', 'northeast', 'northwest', - 'south', 'southeast', 'southwest', 'west']) - ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re) - subst = {} if data_source and data_source.availability_zone: subst['availability_zone'] = data_source.availability_zone - if re.match(ec2_az_re, data_source.availability_zone): + # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) + # the region is us-east-1. so region = az[0:-1] + if _EC2_AZ_RE.match(data_source.availability_zone): subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] if data_source and data_source.region: @@ -761,4 +757,13 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", util.copy(tz_file, tz_local) return + +def uses_systemd(): + try: + res = os.lstat('/run/systemd/system') + return stat.S_ISDIR(res.st_mode) + except Exception: + return False + + # vi: ts=4 expandtab diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index bad112fe..aa468bca 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -116,6 +116,7 @@ class Distro(distros.Distro): (out, err) = util.subp(['ifconfig', '-a']) ifconfigoutput = [x for x in (out.strip()).splitlines() if len(x.split()) > 0] + bsddev = 'NOT_FOUND' for line in ifconfigoutput: m = re.match('^\w+', line) if m: @@ -347,15 +348,9 @@ class Distro(distros.Distro): bymac[Distro.get_interface_mac(n)] = { 'name': n, 'up': self.is_up(n), 'downable': None} + nics_with_addresses = set() if check_downable: - nics_with_addresses = set() - ipv6 = self.get_ipv6() - ipv4 = self.get_ipv4() - for bytes_out in (ipv6, ipv4): - for i in ipv6: - nics_with_addresses.update(i) - for i in ipv4: - nics_with_addresses.update(i) + nics_with_addresses = set(self.get_ipv4() + self.get_ipv6()) for d in bymac.values(): d['downable'] = (d['up'] is False or diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 723d6bd6..d6c61e4c 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -1,6 +1,8 @@ # Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2014 Amazon.com, Inc. or its affiliates. # # Author: Joshua Harlow <harlowja@yahoo-inc.com> +# Author: Andrew Jorgensen <ajorgens@amazon.com> # # This file is part of cloud-init. See LICENSE file for license information. @@ -164,14 +166,11 @@ def get_instance_userdata(api_version='latest', return user_data -def get_instance_metadata(api_version='latest', - metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): - md_url = url_helper.combine_url(metadata_address, api_version) - # Note, 'meta-data' explicitly has trailing /. - # this is required for CloudStack (LP: #1356855) - md_url = url_helper.combine_url(md_url, 'meta-data/') +def _get_instance_metadata(tree, api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): + md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial(util.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries) @@ -189,7 +188,29 @@ def get_instance_metadata(api_version='latest', md = {} return md except Exception: - util.logexc(LOG, "Failed fetching metadata from url %s", md_url) + util.logexc(LOG, "Failed fetching %s from url %s", tree, md_url) return {} + +def get_instance_metadata(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): + # Note, 'meta-data' explicitly has trailing /. + # this is required for CloudStack (LP: #1356855) + return _get_instance_metadata(tree='meta-data/', api_version=api_version, + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, + retries=retries, leaf_decoder=leaf_decoder) + + +def get_instance_identity(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): + return _get_instance_metadata(tree='dynamic/instance-identity', + api_version=api_version, + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, + retries=retries, leaf_decoder=leaf_decoder) # vi: ts=4 expandtab diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index a1b0db10..c015e793 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -18,7 +18,7 @@ SYS_CLASS_NET = "/sys/class/net/" DEFAULT_PRIMARY_INTERFACE = 'eth0' -def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')): +def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): """Sorting for Humans: natural sort order. Can be use as the key to sort functions. This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as @@ -224,7 +224,7 @@ def find_fallback_nic(blacklist_drivers=None): # if eth0 exists use it above anything else, otherwise get the interface # that we can read 'first' (using the sorted defintion of first). - names = list(sorted(potential_interfaces, key=_natural_sort_key)) + names = list(sorted(potential_interfaces, key=natural_sort_key)) if DEFAULT_PRIMARY_INTERFACE in names: names.remove(DEFAULT_PRIMARY_INTERFACE) names.insert(0, DEFAULT_PRIMARY_INTERFACE) diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 38b27a52..7b2cc9db 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -116,10 +116,11 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None): prev = names[name]['entry'] if prev.get('mac_address') != entry.get('mac_address'): raise ValueError( - "device '%s' was defined multiple times (%s)" - " but had differing mac addresses: %s -> %s.", - (name, ' '.join(names[name]['files']), - prev.get('mac_address'), entry.get('mac_address'))) + "device '{name}' was defined multiple times ({files})" + " but had differing mac addresses: {old} -> {new}.".format( + name=name, files=' '.join(names[name]['files']), + old=prev.get('mac_address'), + new=entry.get('mac_address'))) prev['subnets'].extend(entry['subnets']) names[name]['files'].append(cfg_file) else: diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 875a4609..087c0c03 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -10,7 +10,9 @@ import os import re import signal -from cloudinit.net import find_fallback_nic, get_devicelist +from cloudinit.net import ( + EphemeralIPv4Network, find_fallback_nic, get_devicelist) +from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip from cloudinit import temp_utils from cloudinit import util from six import StringIO @@ -29,6 +31,45 @@ class InvalidDHCPLeaseFileError(Exception): pass +class NoDHCPLeaseError(Exception): + """Raised when unable to get a DHCP lease.""" + pass + + +class EphemeralDHCPv4(object): + def __init__(self, iface=None): + self.iface = iface + self._ephipv4 = None + + def __enter__(self): + try: + leases = maybe_perform_dhcp_discovery(self.iface) + except InvalidDHCPLeaseFileError: + raise NoDHCPLeaseError() + if not leases: + raise NoDHCPLeaseError() + lease = leases[-1] + LOG.debug("Received dhcp lease on %s for %s/%s", + lease['interface'], lease['fixed-address'], + lease['subnet-mask']) + nmap = {'interface': 'interface', 'ip': 'fixed-address', + 'prefix_or_mask': 'subnet-mask', + 'broadcast': 'broadcast-address', + 'router': 'routers'} + kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) + if not kwargs['broadcast']: + kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) + ephipv4 = EphemeralIPv4Network(**kwargs) + ephipv4.__enter__() + self._ephipv4 = ephipv4 + return lease + + def __exit__(self, excp_type, excp_value, excp_traceback): + if not self._ephipv4: + return + self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) + + def maybe_perform_dhcp_discovery(nic=None): """Perform dhcp discovery if nic valid and dhclient command exists. diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index e9e2cf4e..fe667d88 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -474,8 +474,9 @@ class NetworkStateInterpreter(object): elif bridge_stp in ['off', '0', 0]: bridge_stp = False else: - raise ValueError("Cannot convert bridge_stp value" - "(%s) to boolean", bridge_stp) + raise ValueError( + 'Cannot convert bridge_stp value ({stp}) to' + ' boolean'.format(stp=bridge_stp)) iface.update({'bridge_stp': bridge_stp}) interfaces.update({iface['name']: iface}) @@ -692,7 +693,8 @@ class NetworkStateInterpreter(object): elif cmd_type == "bond": self.handle_bond(v1_cmd) else: - raise ValueError('Unknown command type: %s', cmd_type) + raise ValueError('Unknown command type: {cmd_type}'.format( + cmd_type=cmd_type)) def _v2_to_v1_ipcfg(self, cfg): """Common ipconfig extraction from v2 to v1 subnets array.""" @@ -959,4 +961,16 @@ def mask_to_net_prefix(mask): return ipv4_mask_to_net_prefix(mask) +def mask_and_ipv4_to_bcast_addr(mask, ip): + """Calculate the broadcast address from the subnet mask and ip addr. + + Supports ipv4 only.""" + ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2) + mask_dec = ipv4_mask_to_net_prefix(mask) + bcast_bin = ip_bin | (2**(32 - mask_dec) - 1) + bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF) + for i in range(4)[::-1]]) + return bcast_str + + # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 43a7e42c..7ac8288d 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -11,6 +11,7 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS" class DataSourceAliYun(EC2.DataSourceEc2): + dsname = 'AliYun' metadata_urls = ['http://100.100.100.200'] # The minimum supported metadata_version from the ec2 metadata apis diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index c78ad9eb..e1d0055b 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -74,6 +74,9 @@ def read_user_data_callback(mount_dir): class DataSourceAltCloud(sources.DataSource): + + dsname = 'AltCloud' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -112,7 +115,7 @@ class DataSourceAltCloud(sources.DataSource): return 'UNKNOWN' - def get_data(self): + def _get_data(self): ''' Description: User Data is passed to the launching instance which @@ -142,7 +145,7 @@ class DataSourceAltCloud(sources.DataSource): else: cloud_type = self.get_cloud_type() - LOG.debug('cloud_type: ' + str(cloud_type)) + LOG.debug('cloud_type: %s', str(cloud_type)) if 'RHEV' in cloud_type: if self.user_data_rhevm(): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 14367e9c..4bcbf3a4 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -11,13 +11,16 @@ from functools import partial import os import os.path import re +from time import time from xml.dom import minidom import xml.etree.ElementTree as ET from cloudinit import log as logging from cloudinit import net +from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric +from cloudinit.url_helper import readurl, wait_for_url, UrlError from cloudinit import util LOG = logging.getLogger(__name__) @@ -26,10 +29,16 @@ DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] AGENT_START_BUILTIN = "__builtin__" -BOUNCE_COMMAND = [ +BOUNCE_COMMAND_IFUP = [ 'sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" ] +BOUNCE_COMMAND_FREEBSD = [ + 'sh', '-xc', + ("i=$interface; x=0; ifconfig down $i || x=$?; " + "ifconfig up $i || x=$?; exit $x") +] + # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' @@ -38,6 +47,9 @@ LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' +REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" +IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" +IMDS_RETRIES = 5 def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -177,11 +189,6 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") - BOUNCE_COMMAND = [ - 'sh', '-xc', - ("i=$interface; x=0; ifconfig down $i || x=$?; " - "ifconfig up $i || x=$?; exit $x") - ] BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -190,7 +197,7 @@ BUILTIN_DS_CONFIG = { 'hostname_bounce': { 'interface': DEFAULT_PRIMARY_NIC, 'policy': True, - 'command': BOUNCE_COMMAND, + 'command': 'builtin', 'hostname_command': 'hostname', }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, @@ -246,6 +253,8 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzure(sources.DataSource): + + dsname = 'Azure' _negotiated = False def __init__(self, sys_cfg, distro, paths): @@ -273,19 +282,20 @@ class DataSourceAzure(sources.DataSource): with temporary_hostname(azure_hostname, self.ds_cfg, hostname_command=hostname_command) \ - as previous_hostname: - if (previous_hostname is not None and + as previous_hn: + if (previous_hn is not None and util.is_true(self.ds_cfg.get('set_hostname'))): cfg = self.ds_cfg['hostname_bounce'] # "Bouncing" the network try: - perform_hostname_bounce(hostname=azure_hostname, - cfg=cfg, - prev_hostname=previous_hostname) + return perform_hostname_bounce(hostname=azure_hostname, + cfg=cfg, + prev_hostname=previous_hn) except Exception as e: LOG.warning("Failed publishing hostname: %s", e) util.logexc(LOG, "handling set_hostname failed") + return False def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') @@ -330,7 +340,7 @@ class DataSourceAzure(sources.DataSource): metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata - def get_data(self): + def _get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid @@ -342,15 +352,20 @@ class DataSourceAzure(sources.DataSource): ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] + if os.path.isfile(REPROVISION_MARKER_FILE): + candidates.insert(0, "IMDS") candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None - + reprovision = False for cdev in candidates: try: - if cdev.startswith("/dev/"): + if cdev == "IMDS": + ret = None + reprovision = True + elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, mtype="udf", sync=False) @@ -367,6 +382,8 @@ class DataSourceAzure(sources.DataSource): LOG.warning("%s was not mountable", cdev) continue + if reprovision or self._should_reprovision(ret): + ret = self._reprovision() (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) @@ -425,6 +442,83 @@ class DataSourceAzure(sources.DataSource): LOG.debug("negotiating already done for %s", self.get_instance_id()) + def _poll_imds(self, report_ready=True): + """Poll IMDS for the new provisioning data until we get a valid + response. Then return the returned JSON object.""" + url = IMDS_URL + "?api-version=2017-04-02" + headers = {"Metadata": "true"} + LOG.debug("Start polling IMDS") + + def sleep_cb(response, loop_n): + return 1 + + def exception_cb(msg, exception): + if isinstance(exception, UrlError) and exception.code == 404: + return + LOG.warning("Exception during polling. Will try DHCP.", + exc_info=True) + + # If we get an exception while trying to call IMDS, we + # call DHCP and setup the ephemeral network to acquire the new IP. + raise exception + + need_report = report_ready + for i in range(IMDS_RETRIES): + try: + with EphemeralDHCPv4() as lease: + if need_report: + self._report_ready(lease=lease) + need_report = False + wait_for_url([url], max_wait=None, timeout=60, + status_cb=LOG.info, + headers_cb=lambda url: headers, sleep_time=1, + exception_cb=exception_cb, + sleep_time_cb=sleep_cb) + return str(readurl(url, headers=headers)) + except Exception: + LOG.debug("Exception during polling-retrying dhcp" + + " %d more time(s).", (IMDS_RETRIES - i), + exc_info=True) + + def _report_ready(self, lease): + """Tells the fabric provisioning has completed + before we go into our polling loop.""" + try: + get_metadata_from_fabric(None, lease['unknown-245']) + except Exception as exc: + LOG.warning( + "Error communicating with Azure fabric; You may experience." + "connectivity issues.", exc_info=True) + + def _should_reprovision(self, ret): + """Whether or not we should poll IMDS for reprovisioning data. + Also sets a marker file to poll IMDS. + + The marker file is used for the following scenario: the VM boots into + this polling loop, which we expect to be proceeding infinitely until + the VM is picked. If for whatever reason the platform moves us to a + new host (for instance a hardware issue), we need to keep polling. + However, since the VM reports ready to the Fabric, we will not attach + the ISO, thus cloud-init needs to have a way of knowing that it should + jump back into the polling loop in order to retrieve the ovf_env.""" + if not ret: + return False + (md, self.userdata_raw, cfg, files) = ret + path = REPROVISION_MARKER_FILE + if (cfg.get('PreprovisionedVm') is True or + os.path.isfile(path)): + if not os.path.isfile(path): + LOG.info("Creating a marker file to poll imds") + util.write_file(path, "%s: %s\n" % (os.getpid(), time())) + return True + return False + + def _reprovision(self): + """Initiate the reprovisioning workflow.""" + contents = self._poll_imds() + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + def _negotiate(self): """Negotiate with fabric and return data from it. @@ -450,7 +544,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False - + util.del_file(REPROVISION_MARKER_FILE) return fabric_data def activate(self, cfg, is_new_instance): @@ -580,18 +674,19 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, if os.path.exists(sempath): try: os.unlink(sempath) - LOG.debug(bmsg + " removed.") + LOG.debug('%s removed.', bmsg) except Exception as e: # python3 throws FileNotFoundError, python2 throws OSError - LOG.warning(bmsg + ": remove failed! (%s)", e) + LOG.warning('%s: remove failed! (%s)', bmsg, e) else: - LOG.debug(bmsg + " did not exist.") + LOG.debug('%s did not exist.', bmsg) return def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command + # Returns True if the network was bounced, False otherwise. command = cfg['command'] interface = cfg['interface'] policy = cfg['policy'] @@ -604,8 +699,15 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): env['old_hostname'] = prev_hostname if command == "builtin": - command = BOUNCE_COMMAND - + if util.is_FreeBSD(): + command = BOUNCE_COMMAND_FREEBSD + elif util.which('ifup'): + command = BOUNCE_COMMAND_IFUP + else: + LOG.debug( + "Skipping network bounce: ifupdown utils aren't present.") + # Don't bounce as networkd handles hostname DDNS updates + return False LOG.debug("pubhname: publishing hostname [%s]", msg) shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. @@ -613,6 +715,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): get_uptime=True, func=util.subp, kwargs={'args': command, 'shell': shell, 'capture': False, 'env': env}) + return True def crtfile_to_pubkey(fname, data=None): @@ -829,9 +932,35 @@ def read_azure_ovf(contents): if 'ssh_pwauth' not in cfg and password: cfg['ssh_pwauth'] = True + cfg['PreprovisionedVm'] = _extract_preprovisioned_vm_setting(dom) + return (md, ud, cfg) +def _extract_preprovisioned_vm_setting(dom): + """Read the preprovision flag from the ovf. It should not + exist unless true.""" + platform_settings_section = find_child( + dom.documentElement, + lambda n: n.localName == "PlatformSettingsSection") + if not platform_settings_section or len(platform_settings_section) == 0: + LOG.debug("PlatformSettingsSection not found") + return False + platform_settings = find_child( + platform_settings_section[0], + lambda n: n.localName == "PlatformSettings") + if not platform_settings or len(platform_settings) == 0: + LOG.debug("PlatformSettings not found") + return False + preprovisionedVm = find_child( + platform_settings[0], + lambda n: n.localName == "PreprovisionedVm") + if not preprovisionedVm or len(preprovisionedVm) == 0: + LOG.debug("PreprovisionedVm not found") + return False + return util.translate_bool(preprovisionedVm[0].firstChild.nodeValue) + + def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py index d7fcd45a..699a85b5 100644 --- a/cloudinit/sources/DataSourceBigstep.py +++ b/cloudinit/sources/DataSourceBigstep.py @@ -16,13 +16,16 @@ LOG = logging.getLogger(__name__) class DataSourceBigstep(sources.DataSource): + + dsname = 'Bigstep' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = {} self.vendordata_raw = "" self.userdata_raw = "" - def get_data(self, apply_filter=False): + def _get_data(self, apply_filter=False): url = get_url_from_file() if url is None: return False diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 19df16b1..4eaad475 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -23,6 +23,9 @@ class DataSourceCloudSigma(sources.DataSource): For more information about CloudSigma's Server Context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html """ + + dsname = 'CloudSigma' + def __init__(self, sys_cfg, distro, paths): self.cepko = Cepko() self.ssh_public_key = '' @@ -46,7 +49,7 @@ class DataSourceCloudSigma(sources.DataSource): LOG.warning("failed to query dmi data for system product name") return False - def get_data(self): + def _get_data(self): """ Metadata is the whole server context and /meta/cloud-config is used as userdata. diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 9dc473fc..0df545fc 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -65,6 +65,9 @@ class CloudStackPasswordServerClient(object): class DataSourceCloudStack(sources.DataSource): + + dsname = 'CloudStack' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'cs') @@ -117,7 +120,7 @@ class DataSourceCloudStack(sources.DataSource): def get_config_obj(self): return self.cfg - def get_data(self): + def _get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): self.userdata_raw = seed_ret['user-data'] diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index ef374f3f..b8db6267 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -25,13 +25,16 @@ DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } FS_TYPES = ('vfat', 'iso9660') -LABEL_TYPES = ('config-2',) +LABEL_TYPES = ('config-2', 'CONFIG-2') POSSIBLE_MOUNTS = ('sr', 'cd') OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2))) class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): + + dsname = 'ConfigDrive' + def __init__(self, sys_cfg, distro, paths): super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) self.source = None @@ -50,7 +53,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): mstr += "[source=%s]" % (self.source) return mstr - def get_data(self): + def _get_data(self): found = None md = {} results = {} @@ -221,7 +224,7 @@ def find_candidate_devs(probe_optical=True): config drive v2: Disk should be: * either vfat or iso9660 formated - * labeled with 'config-2' + * labeled with 'config-2' or 'CONFIG-2' """ # query optical drive to get it in blkid cache for 2.6 kernels if probe_optical: diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5e7e66be..e0ef665e 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -27,6 +27,9 @@ MD_USE_IPV4LL = True class DataSourceDigitalOcean(sources.DataSource): + + dsname = 'DigitalOcean' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro @@ -44,7 +47,7 @@ class DataSourceDigitalOcean(sources.DataSource): def _get_sysinfo(self): return do_helper.read_sysinfo() - def get_data(self): + def _get_data(self): (is_do, droplet_id) = self._get_sysinfo() # only proceed if we know we are on DigitalOcean diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 7bbbfb63..e14553b3 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -14,7 +14,7 @@ import time from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import net -from cloudinit.net import dhcp +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util @@ -31,6 +31,7 @@ _unset = "_unset" class Platforms(object): + # TODO Rename and move to cloudinit.cloud.CloudNames ALIYUN = "AliYun" AWS = "AWS" BRIGHTBOX = "Brightbox" @@ -45,6 +46,7 @@ class Platforms(object): class DataSourceEc2(sources.DataSource): + dsname = 'Ec2' # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the # following may be discarded if they do not resolve @@ -68,11 +70,15 @@ class DataSourceEc2(sources.DataSource): _fallback_interface = None def __init__(self, sys_cfg, distro, paths): - sources.DataSource.__init__(self, sys_cfg, distro, paths) + super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None self.seed_dir = os.path.join(paths.seed_dir, "ec2") - def get_data(self): + def _get_cloud_name(self): + """Return the cloud name as identified during _get_data.""" + return self.cloud_platform + + def _get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): self.userdata_raw = seed_ret['user-data'] @@ -96,22 +102,13 @@ class DataSourceEc2(sources.DataSource): if util.is_FreeBSD(): LOG.debug("FreeBSD doesn't support running dhclient with -sf") return False - dhcp_leases = dhcp.maybe_perform_dhcp_discovery( - self.fallback_interface) - if not dhcp_leases: - # DataSourceEc2Local failed in init-local stage. DataSourceEc2 - # will still run in init-network stage. + try: + with EphemeralDHCPv4(self.fallback_interface): + return util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self._crawl_metadata) + except NoDHCPLeaseError: return False - dhcp_opts = dhcp_leases[-1] - net_params = {'interface': dhcp_opts.get('interface'), - 'ip': dhcp_opts.get('fixed-address'), - 'prefix_or_mask': dhcp_opts.get('subnet-mask'), - 'broadcast': dhcp_opts.get('broadcast-address'), - 'router': dhcp_opts.get('routers')} - with net.EphemeralIPv4Network(**net_params): - return util.log_time( - logfunc=LOG.debug, msg='Crawl of metadata service', - func=self._crawl_metadata) else: return self._crawl_metadata() @@ -148,7 +145,12 @@ class DataSourceEc2(sources.DataSource): return self.min_metadata_version def get_instance_id(self): - return self.metadata['instance-id'] + if self.cloud_platform == Platforms.AWS: + # Prefer the ID from the instance identity document, but fall back + return self.identity.get( + 'instanceId', self.metadata['instance-id']) + else: + return self.metadata['instance-id'] def _get_url_settings(self): mcfg = self.ds_cfg @@ -262,19 +264,31 @@ class DataSourceEc2(sources.DataSource): @property def availability_zone(self): try: - return self.metadata['placement']['availability-zone'] + if self.cloud_platform == Platforms.AWS: + return self.identity.get( + 'availabilityZone', + self.metadata['placement']['availability-zone']) + else: + return self.metadata['placement']['availability-zone'] except KeyError: return None @property def region(self): - az = self.availability_zone - if az is not None: - return az[:-1] + if self.cloud_platform == Platforms.AWS: + region = self.identity.get('region') + # Fallback to trimming the availability zone if region is missing + if self.availability_zone and not region: + region = self.availability_zone[:-1] + return region + else: + az = self.availability_zone + if az is not None: + return az[:-1] return None @property - def cloud_platform(self): + def cloud_platform(self): # TODO rename cloud_name if self._cloud_platform is None: self._cloud_platform = identify_platform() return self._cloud_platform @@ -351,6 +365,9 @@ class DataSourceEc2(sources.DataSource): api_version, self.metadata_address) self.metadata = ec2.get_instance_metadata( api_version, self.metadata_address) + if self.cloud_platform == Platforms.AWS: + self.identity = ec2.get_instance_identity( + api_version, self.metadata_address).get('document', {}) except Exception: util.logexc( LOG, "Failed reading from metadata address %s", diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index ccae4200..2da34a99 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -2,8 +2,12 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import datetime +import json + from base64 import b64decode +from cloudinit.distros import ug_util from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper @@ -17,16 +21,18 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') class GoogleMetadataFetcher(object): - headers = {'X-Google-Metadata-Request': 'True'} + headers = {'Metadata-Flavor': 'Google'} def __init__(self, metadata_address): self.metadata_address = metadata_address - def get_value(self, path, is_text): + def get_value(self, path, is_text, is_recursive=False): value = None try: - resp = url_helper.readurl(url=self.metadata_address + path, - headers=self.headers) + url = self.metadata_address + path + if is_recursive: + url += '/?recursive=True' + resp = url_helper.readurl(url=url, headers=self.headers) except url_helper.UrlError as exc: msg = "url %s raised exception %s" LOG.debug(msg, path, exc) @@ -35,22 +41,29 @@ class GoogleMetadataFetcher(object): if is_text: value = util.decode_binary(resp.contents) else: - value = resp.contents + value = resp.contents.decode('utf-8') else: LOG.debug("url %s returned code %s", path, resp.code) return value class DataSourceGCE(sources.DataSource): + + dsname = 'GCE' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.default_user = None + if distro: + (users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro) + (self.default_user, _user_config) = ug_util.extract_default(users) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] - def get_data(self): + def _get_data(self): ret = util.log_time( LOG.debug, 'Crawl of GCE metadata service', read_md, kwargs={'address': self.metadata_address}) @@ -67,17 +80,18 @@ class DataSourceGCE(sources.DataSource): @property def launch_index(self): - # GCE does not provide lauch_index property + # GCE does not provide lauch_index property. return None def get_instance_id(self): return self.metadata['instance-id'] def get_public_ssh_keys(self): - return self.metadata['public-keys'] + public_keys_data = self.metadata['public-keys-data'] + return _parse_public_keys(public_keys_data, self.default_user) def get_hostname(self, fqdn=False, resolve_ip=False): - # GCE has long FDQN's and has asked for short hostnames + # GCE has long FDQN's and has asked for short hostnames. return self.metadata['local-hostname'].split('.')[0] @property @@ -89,15 +103,58 @@ class DataSourceGCE(sources.DataSource): return self.availability_zone.rsplit('-', 1)[0] -def _trim_key(public_key): - # GCE takes sshKeys attribute in the format of '<user>:<public_key>' - # so we have to trim each key to remove the username part +def _has_expired(public_key): + # Check whether an SSH key is expired. Public key input is a single SSH + # public key in the GCE specific key format documented here: + # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat + try: + # Check for the Google-specific schema identifier. + schema, json_str = public_key.split(None, 3)[2:] + except (ValueError, AttributeError): + return False + + # Do not expire keys if they do not have the expected schema identifier. + if schema != 'google-ssh': + return False + + try: + json_obj = json.loads(json_str) + except ValueError: + return False + + # Do not expire keys if there is no expriation timestamp. + if 'expireOn' not in json_obj: + return False + + expire_str = json_obj['expireOn'] + format_str = '%Y-%m-%dT%H:%M:%S+0000' try: - index = public_key.index(':') - if index > 0: - return public_key[(index + 1):] - except Exception: - return public_key + expire_time = datetime.datetime.strptime(expire_str, format_str) + except ValueError: + return False + + # Expire the key if and only if we have exceeded the expiration timestamp. + return datetime.datetime.utcnow() > expire_time + + +def _parse_public_keys(public_keys_data, default_user=None): + # Parse the SSH key data for the default user account. Public keys input is + # a list containing SSH public keys in the GCE specific key format + # documented here: + # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat + public_keys = [] + if not public_keys_data: + return public_keys + for public_key in public_keys_data: + if not public_key or not all(ord(c) < 128 for c in public_key): + continue + split_public_key = public_key.split(':', 1) + if len(split_public_key) != 2: + continue + user, key = split_public_key + if user in ('cloudinit', default_user) and not _has_expired(key): + public_keys.append(key) + return public_keys def read_md(address=None, platform_check=True): @@ -113,31 +170,28 @@ def read_md(address=None, platform_check=True): ret['reason'] = "Not running on GCE." return ret - # if we cannot resolve the metadata server, then no point in trying + # If we cannot resolve the metadata server, then no point in trying. if not util.is_resolvable_url(address): LOG.debug("%s is not resolvable", address) ret['reason'] = 'address "%s" is not resolvable' % address return ret - # url_map: (our-key, path, required, is_text) + # url_map: (our-key, path, required, is_text, is_recursive) url_map = [ - ('instance-id', ('instance/id',), True, True), - ('availability-zone', ('instance/zone',), True, True), - ('local-hostname', ('instance/hostname',), True, True), - ('public-keys', ('project/attributes/sshKeys', - 'instance/attributes/ssh-keys'), False, True), - ('user-data', ('instance/attributes/user-data',), False, False), - ('user-data-encoding', ('instance/attributes/user-data-encoding',), - False, True), + ('instance-id', ('instance/id',), True, True, False), + ('availability-zone', ('instance/zone',), True, True, False), + ('local-hostname', ('instance/hostname',), True, True, False), + ('instance-data', ('instance/attributes',), False, False, True), + ('project-data', ('project/attributes',), False, False, True), ] metadata_fetcher = GoogleMetadataFetcher(address) md = {} - # iterate over url_map keys to get metadata items - for (mkey, paths, required, is_text) in url_map: + # Iterate over url_map keys to get metadata items. + for (mkey, paths, required, is_text, is_recursive) in url_map: value = None for path in paths: - new_value = metadata_fetcher.get_value(path, is_text) + new_value = metadata_fetcher.get_value(path, is_text, is_recursive) if new_value is not None: value = new_value if required and value is None: @@ -146,17 +200,23 @@ def read_md(address=None, platform_check=True): return ret md[mkey] = value - if md['public-keys']: - lines = md['public-keys'].splitlines() - md['public-keys'] = [_trim_key(k) for k in lines] + instance_data = json.loads(md['instance-data'] or '{}') + project_data = json.loads(md['project-data'] or '{}') + valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')] + block_project = instance_data.get('block-project-ssh-keys', '').lower() + if block_project != 'true' and not instance_data.get('sshKeys'): + valid_keys.append(project_data.get('ssh-keys')) + valid_keys.append(project_data.get('sshKeys')) + public_keys_data = '\n'.join([key for key in valid_keys if key]) + md['public-keys-data'] = public_keys_data.splitlines() if md['availability-zone']: md['availability-zone'] = md['availability-zone'].split('/')[-1] - encoding = md.get('user-data-encoding') + encoding = instance_data.get('user-data-encoding') if encoding: if encoding == 'base64': - md['user-data'] = b64decode(md['user-data']) + md['user-data'] = b64decode(instance_data.get('user-data')) else: LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) @@ -185,20 +245,19 @@ def platform_reports_gce(): return False -# Used to match classes to dependencies +# Used to match classes to dependencies. datasources = [ (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] -# Return a list of data sources that match this set of dependencies +# Return a list of data sources that match this set of dependencies. def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) if __name__ == "__main__": import argparse - import json import sys from base64 import b64encode @@ -214,7 +273,7 @@ if __name__ == "__main__": data = read_md(address=args.endpoint, platform_check=args.platform_check) if 'user-data' in data: # user-data is bytes not string like other things. Handle it specially. - # if it can be represented as utf-8 then do so. Otherwise print base64 + # If it can be represented as utf-8 then do so. Otherwise print base64 # encoded value in the key user-data-b64. try: data['user-data'] = data['user-data'].decode() @@ -222,7 +281,7 @@ if __name__ == "__main__": sys.stderr.write("User-data cannot be decoded. " "Writing as base64\n") del data['user-data'] - # b64encode returns a bytes value. decode to get the string. + # b64encode returns a bytes value. Decode to get the string. data['user-data-b64'] = b64encode(data['user-data']).decode() print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': '))) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 77df5a51..6ac88635 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -8,6 +8,7 @@ from __future__ import print_function +import hashlib import os import time @@ -39,30 +40,28 @@ class DataSourceMAAS(sources.DataSource): hostname vendor-data """ + + dsname = "MAAS" + id_hash = None + _oauth_helper = None + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None self.seed_dir = os.path.join(paths.seed_dir, 'maas') - self.oauth_helper = self._get_helper() - - def _get_helper(self): - mcfg = self.ds_cfg - # If we are missing token_key, token_secret or consumer_key - # then just do non-authed requests - for required in ('token_key', 'token_secret', 'consumer_key'): - if required not in mcfg: - return url_helper.OauthUrlHelper() + self.id_hash = get_id_from_ds_cfg(self.ds_cfg) - return url_helper.OauthUrlHelper( - consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], - token_secret=mcfg['token_secret'], - consumer_secret=mcfg.get('consumer_secret')) + @property + def oauth_helper(self): + if not self._oauth_helper: + self._oauth_helper = get_oauth_helper(self.ds_cfg) + return self._oauth_helper def __str__(self): root = sources.DataSource.__str__(self) return "%s [%s]" % (root, self.base_url) - def get_data(self): + def _get_data(self): mcfg = self.ds_cfg try: @@ -144,6 +143,36 @@ class DataSourceMAAS(sources.DataSource): return bool(url) + def check_instance_id(self, sys_cfg): + """locally check if the current system is the same instance. + + MAAS doesn't provide a real instance-id, and if it did, it is + still only available over the network. We need to check based + only on local resources. So compute a hash based on Oauth tokens.""" + if self.id_hash is None: + return False + ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {}) + return (self.id_hash == get_id_from_ds_cfg(ncfg)) + + +def get_oauth_helper(cfg): + """Return an oauth helper instance for values in cfg. + + @raises ValueError from OauthUrlHelper if some required fields have + true-ish values but others do not.""" + keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret') + kwargs = dict([(r, cfg.get(r)) for r in keys]) + return url_helper.OauthUrlHelper(**kwargs) + + +def get_id_from_ds_cfg(ds_cfg): + """Given a config, generate a unique identifier for this node.""" + fields = ('consumer_key', 'token_key', 'token_secret') + idstr = '\0'.join([ds_cfg.get(f, "") for f in fields]) + # store the encoding version as part of the hash in the event + # that it ever changed we can compute older versions. + return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest() + def read_maas_seed_dir(seed_d): if seed_d.startswith("file://"): @@ -319,7 +348,7 @@ if __name__ == "__main__": sys.stderr.write("Must provide a url or a config with url.\n") sys.exit(1) - oauth_helper = url_helper.OauthUrlHelper(**creds) + oauth_helper = get_oauth_helper(creds) def geturl(url): # the retry is to ensure that oauth timestamp gets fixed diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index e641244d..5d3a8ddb 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -20,6 +20,9 @@ LOG = logging.getLogger(__name__) class DataSourceNoCloud(sources.DataSource): + + dsname = "NoCloud" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -32,7 +35,7 @@ class DataSourceNoCloud(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) - def get_data(self): + def _get_data(self): defaults = { "instance-id": "nocloud", "dsmode": self.dsmode, diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index 906bb278..e63a7e39 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -11,12 +11,15 @@ LOG = logging.getLogger(__name__) class DataSourceNone(sources.DataSource): + + dsname = "None" + def __init__(self, sys_cfg, distro, paths, ud_proc=None): sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) self.metadata = {} self.userdata_raw = '' - def get_data(self): + def _get_data(self): # If the datasource config has any provided 'fallback' # userdata or metadata, use it... if 'userdata_raw' in self.ds_cfg: diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index ccebf11a..6e62f984 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -21,6 +21,8 @@ from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config \ import Config +from cloudinit.sources.helpers.vmware.imc.config_custom_script \ + import PreCustomScript, PostCustomScript from cloudinit.sources.helpers.vmware.imc.config_file \ import ConfigFile from cloudinit.sources.helpers.vmware.imc.config_nic \ @@ -30,7 +32,7 @@ from cloudinit.sources.helpers.vmware.imc.config_passwd \ from cloudinit.sources.helpers.vmware.imc.guestcust_error \ import GuestCustErrorEnum from cloudinit.sources.helpers.vmware.imc.guestcust_event \ - import GuestCustEventEnum + import GuestCustEventEnum as GuestCustEvent from cloudinit.sources.helpers.vmware.imc.guestcust_state \ import GuestCustStateEnum from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( @@ -43,6 +45,9 @@ LOG = logging.getLogger(__name__) class DataSourceOVF(sources.DataSource): + + dsname = "OVF" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -60,7 +65,7 @@ class DataSourceOVF(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) - def get_data(self): + def _get_data(self): found = [] md = {} ud = "" @@ -124,17 +129,31 @@ class DataSourceOVF(sources.DataSource): self._vmware_cust_conf = Config(cf) (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) self._vmware_nics_to_enable = get_nics_to_enable(nicspath) - markerid = self._vmware_cust_conf.marker_id - markerexists = check_marker_exists(markerid) + imcdirpath = os.path.dirname(vmwareImcConfigFilePath) + product_marker = self._vmware_cust_conf.marker_id + hasmarkerfile = check_marker_exists( + product_marker, os.path.join(self.paths.cloud_dir, 'data')) + special_customization = product_marker and not hasmarkerfile + customscript = self._vmware_cust_conf.custom_script_name except Exception as e: - LOG.debug("Error parsing the customization Config File") - LOG.exception(e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) - raise e - finally: - util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) + _raise_error_status( + "Error parsing the customization Config File", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + + if special_customization: + if customscript: + try: + precust = PreCustomScript(customscript, imcdirpath) + precust.execute() + except Exception as e: + _raise_error_status( + "Error executing pre-customization script", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + try: LOG.debug("Preparing the Network configuration") self._network_config = get_network_config_from_conf( @@ -143,13 +162,13 @@ class DataSourceOVF(sources.DataSource): True, self.distro.osfamily) except Exception as e: - LOG.exception(e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) - raise e + _raise_error_status( + "Error preparing Network Configuration", + e, + GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, + vmwareImcConfigFilePath) - if markerid and not markerexists: + if special_customization: LOG.debug("Applying password customization") pwdConfigurator = PasswordConfigurator() adminpwd = self._vmware_cust_conf.admin_password @@ -161,27 +180,41 @@ class DataSourceOVF(sources.DataSource): else: LOG.debug("Changing password is not needed") except Exception as e: - LOG.debug("Error applying Password Configuration: %s", e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) - return False - if markerid: - LOG.debug("Handle marker creation") + _raise_error_status( + "Error applying Password Configuration", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + + if customscript: + try: + postcust = PostCustomScript(customscript, imcdirpath) + postcust.execute() + except Exception as e: + _raise_error_status( + "Error executing post-customization script", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + + if product_marker: try: - setup_marker_files(markerid) + setup_marker_files( + product_marker, + os.path.join(self.paths.cloud_dir, 'data')) except Exception as e: - LOG.debug("Error creating marker files: %s", e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) - return False + _raise_error_status( + "Error creating marker files", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) self._vmware_cust_found = True found.append('vmware-tools') # TODO: Need to set the status to DONE only when the # customization is done successfully. + util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) enable_nics(self._vmware_nics_to_enable) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, @@ -536,31 +569,52 @@ def get_datasource_list(depends): # To check if marker file exists -def check_marker_exists(markerid): +def check_marker_exists(markerid, marker_dir): """ Check the existence of a marker file. Presence of marker file determines whether a certain code path is to be executed. It is needed for partial guest customization in VMware. + @param markerid: is an unique string representing a particular product + marker. + @param: marker_dir: The directory in which markers exist. """ if not markerid: return False - markerfile = "/.markerfile-" + markerid + markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") if os.path.exists(markerfile): return True return False # Create a marker file -def setup_marker_files(markerid): +def setup_marker_files(markerid, marker_dir): """ Create a new marker file. Marker files are unique to a full customization workflow in VMware environment. + @param markerid: is an unique string representing a particular product + marker. + @param: marker_dir: The directory in which markers exist. + """ - if not markerid: - return - markerfile = "/.markerfile-" + markerid - util.del_file("/.markerfile-*.txt") + LOG.debug("Handle marker creation") + markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") + for fname in os.listdir(marker_dir): + if fname.startswith(".markerfile"): + util.del_file(os.path.join(marker_dir, fname)) open(markerfile, 'w').close() + +def _raise_error_status(prefix, error, event, config_file): + """ + Raise error and send customization status to the underlying VMware + Virtualization Platform. Also, cleanup the imc directory. + """ + LOG.debug('%s: %s', prefix, error) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + event) + util.del_dir(os.path.dirname(config_file)) + raise error + # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 5fdac192..ce47b6bd 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -12,6 +12,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import collections import os import pwd import re @@ -19,6 +20,7 @@ import string from cloudinit import log as logging from cloudinit import net +from cloudinit.net import eni from cloudinit import sources from cloudinit import util @@ -31,6 +33,9 @@ CONTEXT_DISK_FILES = ["context.sh"] class DataSourceOpenNebula(sources.DataSource): + + dsname = "OpenNebula" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -40,7 +45,7 @@ class DataSourceOpenNebula(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) - def get_data(self): + def _get_data(self): defaults = {"instance-id": DEFAULT_IID} results = None seed = None @@ -86,11 +91,18 @@ class DataSourceOpenNebula(sources.DataSource): return False self.seed = seed - self.network_eni = results.get("network_config") + self.network_eni = results.get('network-interfaces') self.metadata = md self.userdata_raw = results.get('userdata') return True + @property + def network_config(self): + if self.network_eni is not None: + return eni.convert_eni_data(self.network_eni) + else: + return None + def get_hostname(self, fqdn=False, resolve_ip=None): if resolve_ip is None: if self.dsmode == sources.DSMODE_NETWORK: @@ -113,58 +125,53 @@ class OpenNebulaNetwork(object): self.context = context if system_nics_by_mac is None: system_nics_by_mac = get_physical_nics_by_mac() - self.ifaces = system_nics_by_mac + self.ifaces = collections.OrderedDict( + [k for k in sorted(system_nics_by_mac.items(), + key=lambda k: net.natural_sort_key(k[1]))]) + + # OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC. + # context_devname provides {mac.lower():ETHX, mac2.lower():ETHX} + self.context_devname = {} + for k, v in context.items(): + m = re.match(r'^(.+)_MAC$', k) + if m: + self.context_devname[v.lower()] = m.group(1) def mac2ip(self, mac): - components = mac.split(':')[2:] - return [str(int(c, 16)) for c in components] + return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]]) - def get_ip(self, dev, components): - var_name = dev.upper() + '_IP' - if var_name in self.context: - return self.context[var_name] - else: - return '.'.join(components) + def mac2network(self, mac): + return self.mac2ip(mac).rpartition(".")[0] + ".0" - def get_mask(self, dev): - var_name = dev.upper() + '_MASK' - if var_name in self.context: - return self.context[var_name] - else: - return '255.255.255.0' + def get_dns(self, dev): + return self.get_field(dev, "dns", "").split() - def get_network(self, dev, components): - var_name = dev.upper() + '_NETWORK' - if var_name in self.context: - return self.context[var_name] - else: - return '.'.join(components[:-1]) + '.0' + def get_domain(self, dev): + return self.get_field(dev, "domain") + + def get_ip(self, dev, mac): + return self.get_field(dev, "ip", self.mac2ip(mac)) def get_gateway(self, dev): - var_name = dev.upper() + '_GATEWAY' - if var_name in self.context: - return self.context[var_name] - else: - return None + return self.get_field(dev, "gateway") - def get_dns(self, dev): - var_name = dev.upper() + '_DNS' - if var_name in self.context: - return self.context[var_name] - else: - return None + def get_mask(self, dev): + return self.get_field(dev, "mask", "255.255.255.0") - def get_domain(self, dev): - var_name = dev.upper() + '_DOMAIN' - if var_name in self.context: - return self.context[var_name] - else: - return None + def get_network(self, dev, mac): + return self.get_field(dev, "network", self.mac2network(mac)) + + def get_field(self, dev, name, default=None): + """return the field name in context for device dev. + + context stores <dev>_<NAME> (example: eth0_DOMAIN). + an empty string for value will return default.""" + val = self.context.get('_'.join((dev, name,)).upper()) + # allow empty string to return the default. + return default if val in (None, "") else val def gen_conf(self): - global_dns = [] - if 'DNS' in self.context: - global_dns.append(self.context['DNS']) + global_dns = self.context.get('DNS', "").split() conf = [] conf.append('auto lo') @@ -172,29 +179,31 @@ class OpenNebulaNetwork(object): conf.append('') for mac, dev in self.ifaces.items(): - ip_components = self.mac2ip(mac) + mac = mac.lower() + + # c_dev stores name in context 'ETHX' for this device. + # dev stores the current system name. + c_dev = self.context_devname.get(mac, dev) conf.append('auto ' + dev) conf.append('iface ' + dev + ' inet static') - conf.append(' address ' + self.get_ip(dev, ip_components)) - conf.append(' network ' + self.get_network(dev, ip_components)) - conf.append(' netmask ' + self.get_mask(dev)) + conf.append(' #hwaddress %s' % mac) + conf.append(' address ' + self.get_ip(c_dev, mac)) + conf.append(' network ' + self.get_network(c_dev, mac)) + conf.append(' netmask ' + self.get_mask(c_dev)) - gateway = self.get_gateway(dev) + gateway = self.get_gateway(c_dev) if gateway: conf.append(' gateway ' + gateway) - domain = self.get_domain(dev) + domain = self.get_domain(c_dev) if domain: conf.append(' dns-search ' + domain) # add global DNS servers to all interfaces - dns = self.get_dns(dev) + dns = self.get_dns(c_dev) if global_dns or dns: - all_dns = global_dns - if dns: - all_dns.append(dns) - conf.append(' dns-nameservers ' + ' '.join(all_dns)) + conf.append(' dns-nameservers ' + ' '.join(global_dns + dns)) conf.append('') @@ -329,8 +338,9 @@ def read_context_disk_dir(source_dir, asuser=None): try: pwd.getpwnam(asuser) except KeyError as e: - raise BrokenContextDiskDir("configured user '%s' " - "does not exist", asuser) + raise BrokenContextDiskDir( + "configured user '{user}' does not exist".format( + user=asuser)) try: path = os.path.join(source_dir, 'context.sh') content = util.load_file(path) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index b64a7f24..e55a7638 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -24,6 +24,9 @@ DEFAULT_METADATA = { class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): + + dsname = "OpenStack" + def __init__(self, sys_cfg, distro, paths): super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) self.metadata_address = None @@ -96,7 +99,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.metadata_address = url2base.get(avail_url) return bool(avail_url) - def get_data(self): + def _get_data(self): try: if not self.wait_for_metadata_service(): return False diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 3a8a8e8f..b0b19c93 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -169,6 +169,8 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): + dsname = "Scaleway" + def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) @@ -184,7 +186,7 @@ class DataSourceScaleway(sources.DataSource): self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) - def get_data(self): + def _get_data(self): if not on_scaleway(): return False diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 6c6902fd..86bfa5d8 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -159,6 +159,9 @@ LEGACY_USER_D = "/var/db" class DataSourceSmartOS(sources.DataSource): + + dsname = "Joyent" + _unset = "_unset" smartos_type = _unset md_client = _unset @@ -211,7 +214,7 @@ class DataSourceSmartOS(sources.DataSource): os.rename('/'.join([svc_path, 'provisioning']), '/'.join([svc_path, 'provision_success'])) - def get_data(self): + def _get_data(self): self._init() md = {} diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 9a43fbee..a05ca2f6 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -10,9 +10,11 @@ import abc import copy +import json import os import six +from cloudinit.atomic_helper import write_json from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils @@ -33,6 +35,12 @@ DEP_FILESYSTEM = "FILESYSTEM" DEP_NETWORK = "NETWORK" DS_PREFIX = 'DataSource' +# File in which instance meta-data, user-data and vendor-data is written +INSTANCE_JSON_FILE = 'instance-data.json' + +# Key which can be provide a cloud's official product name to cloud-init +METADATA_CLOUD_NAME_KEY = 'cloud-name' + LOG = logging.getLogger(__name__) @@ -40,12 +48,39 @@ class DataSourceNotFoundException(Exception): pass +def process_base64_metadata(metadata, key_path=''): + """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" + md_copy = copy.deepcopy(metadata) + md_copy['base64-encoded-keys'] = [] + for key, val in metadata.items(): + if key_path: + sub_key_path = key_path + '/' + key + else: + sub_key_path = key + if isinstance(val, str) and val.startswith('ci-b64:'): + md_copy['base64-encoded-keys'].append(sub_key_path) + md_copy[key] = val.replace('ci-b64:', '') + if isinstance(val, dict): + return_val = process_base64_metadata(val, sub_key_path) + md_copy['base64-encoded-keys'].extend( + return_val.pop('base64-encoded-keys')) + md_copy[key] = return_val + return md_copy + + @six.add_metaclass(abc.ABCMeta) class DataSource(object): dsmode = DSMODE_NETWORK default_locale = 'en_US.UTF-8' + # Datasource name needs to be set by subclasses to determine which + # cloud-config datasource key is loaded + dsname = '_undef' + + # Cached cloud_name as determined by _get_cloud_name + _cloud_name = None + def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro @@ -56,17 +91,8 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None - # find the datasource config name. - # remove 'DataSource' from classname on front, and remove 'Net' on end. - # Both Foo and FooNet sources expect config in cfg['sources']['Foo'] - name = type_utils.obj_name(self) - if name.startswith(DS_PREFIX): - name = name[len(DS_PREFIX):] - if name.endswith('Net'): - name = name[0:-3] - - self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, - ("datasource", name), {}) + self.ds_cfg = util.get_cfg_by_path( + self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: self.ds_cfg = {} @@ -78,6 +104,51 @@ class DataSource(object): def __str__(self): return type_utils.obj_name(self) + def _get_standardized_metadata(self): + """Return a dictionary of standardized metadata keys.""" + return {'v1': { + 'local-hostname': self.get_hostname(), + 'instance-id': self.get_instance_id(), + 'cloud-name': self.cloud_name, + 'region': self.region, + 'availability-zone': self.availability_zone}} + + def get_data(self): + """Datasources implement _get_data to setup metadata and userdata_raw. + + Minimally, the datasource should return a boolean True on success. + """ + return_value = self._get_data() + json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) + if not return_value: + return return_value + + instance_data = { + 'ds': { + 'meta-data': self.metadata, + 'user-data': self.get_userdata_raw(), + 'vendor-data': self.get_vendordata_raw()}} + instance_data.update( + self._get_standardized_metadata()) + try: + # Process content base64encoding unserializable values + content = util.json_dumps(instance_data) + # Strip base64: prefix and return base64-encoded-keys + processed_data = process_base64_metadata(json.loads(content)) + except TypeError as e: + LOG.warning('Error persisting instance-data.json: %s', str(e)) + return return_value + except UnicodeDecodeError as e: + LOG.warning('Error persisting instance-data.json: %s', str(e)) + return return_value + write_json(json_file, processed_data, mode=0o600) + return return_value + + def _get_data(self): + raise NotImplementedError( + 'Subclasses of DataSource must implement _get_data which' + ' sets self.metadata, vendordata_raw and userdata_raw.') + def get_userdata(self, apply_filter=False): if self.userdata is None: self.userdata = self.ud_proc.process(self.get_userdata_raw()) @@ -91,6 +162,34 @@ class DataSource(object): return self.vendordata @property + def cloud_name(self): + """Return lowercase cloud name as determined by the datasource. + + Datasource can determine or define its own cloud product name in + metadata. + """ + if self._cloud_name: + return self._cloud_name + if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY): + cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) + if isinstance(cloud_name, six.string_types): + self._cloud_name = cloud_name.lower() + LOG.debug( + 'Ignoring metadata provided key %s: non-string type %s', + METADATA_CLOUD_NAME_KEY, type(cloud_name)) + else: + self._cloud_name = self._get_cloud_name().lower() + return self._cloud_name + + def _get_cloud_name(self): + """Return the datasource name as it frequently matches cloud name. + + Should be overridden in subclasses which can run on multiple + cloud names, such as DatasourceEc2. + """ + return self.dsname + + @property def launch_index(self): if not self.metadata: return None @@ -161,8 +260,11 @@ class DataSource(object): @property def availability_zone(self): - return self.metadata.get('availability-zone', - self.metadata.get('availability_zone')) + top_level_az = self.metadata.get( + 'availability-zone', self.metadata.get('availability_zone')) + if top_level_az: + return top_level_az + return self.metadata.get('placement', {}).get('availability-zone') @property def region(self): @@ -346,7 +448,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): # Return an ordered list of classes that match (if any) def list_sources(cfg_list, depends, pkg_list): src_list = [] - LOG.debug(("Looking for for data source in: %s," + LOG.debug(("Looking for data source in: %s," " via packages %s that matches dependencies %s"), cfg_list, pkg_list, depends) for ds_name in cfg_list: @@ -417,4 +519,5 @@ def list_from_depends(depends, ds_list): ret_list.append(cls) return ret_list + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 959b1bda..90c12df1 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -199,10 +199,10 @@ class WALinuxAgentShim(object): ' </Container>', '</Health>']) - def __init__(self, fallback_lease_file=None): + def __init__(self, fallback_lease_file=None, dhcp_options=None): LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s', fallback_lease_file) - self.dhcpoptions = None + self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None self.values = {} @@ -220,7 +220,8 @@ class WALinuxAgentShim(object): @property def endpoint(self): if self._endpoint is None: - self._endpoint = self.find_endpoint(self.lease_file) + self._endpoint = self.find_endpoint(self.lease_file, + self.dhcpoptions) return self._endpoint @staticmethod @@ -274,7 +275,8 @@ class WALinuxAgentShim(object): name = os.path.basename(hook_file).replace('.json', '') dhcp_options[name] = json.loads(util.load_file((hook_file))) except ValueError: - raise ValueError("%s is not valid JSON data", hook_file) + raise ValueError( + '{_file} is not valid JSON data'.format(_file=hook_file)) return dhcp_options @staticmethod @@ -291,10 +293,14 @@ class WALinuxAgentShim(object): return _value @staticmethod - def find_endpoint(fallback_lease_file=None): + def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None - LOG.debug('Finding Azure endpoint from networkd...') - value = WALinuxAgentShim._networkd_get_value_from_leases() + if dhcp245 is not None: + value = dhcp245 + LOG.debug("Using Azure Endpoint from dhcp options") + if value is None: + LOG.debug('Finding Azure endpoint from networkd...') + value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json # a dhclient exit hook that calls cloud-init-dhclient-hook @@ -366,8 +372,9 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None): - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file) +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): + shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, + dhcp_options=dhcp_opts) try: return shim.register_with_azure_and_fetch_data() finally: diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index 49d441db..2eaeff34 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -100,4 +100,8 @@ class Config(object): """Returns marker id.""" return self._configFile.get(Config.MARKERID, None) + @property + def custom_script_name(self): + """Return the name of custom (pre/post) script.""" + return self._configFile.get(Config.CUSTOM_SCRIPT, None) # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py new file mode 100644 index 00000000..a7d4ad91 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py @@ -0,0 +1,153 @@ +# Copyright (C) 2017 Canonical Ltd. +# Copyright (C) 2017 VMware Inc. +# +# Author: Maitreyee Saikia <msaikia@vmware.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +import logging +import os +import stat +from textwrap import dedent + +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class CustomScriptNotFound(Exception): + pass + + +class CustomScriptConstant(object): + RC_LOCAL = "/etc/rc.local" + POST_CUST_TMP_DIR = "/root/.customization" + POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh" + POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR, + POST_CUST_RUN_SCRIPT_NAME) + POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + + +class RunCustomScript(object): + def __init__(self, scriptname, directory): + self.scriptname = scriptname + self.directory = directory + self.scriptpath = os.path.join(directory, scriptname) + + def prepare_script(self): + if not os.path.exists(self.scriptpath): + raise CustomScriptNotFound("Script %s not found!! " + "Cannot execute custom script!" + % self.scriptpath) + # Strip any CR characters from the decoded script + util.load_file(self.scriptpath).replace("\r", "") + st = os.stat(self.scriptpath) + os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC) + + +class PreCustomScript(RunCustomScript): + def execute(self): + """Executing custom script with precustomization argument.""" + LOG.debug("Executing pre-customization script") + self.prepare_script() + util.subp(["/bin/sh", self.scriptpath, "precustomization"]) + + +class PostCustomScript(RunCustomScript): + def __init__(self, scriptname, directory): + super(PostCustomScript, self).__init__(scriptname, directory) + # Determine when to run custom script. When postreboot is True, + # the user uploaded script will run as part of rc.local after + # the machine reboots. This is determined by presence of rclocal. + # When postreboot is False, script will run as part of cloud-init. + self.postreboot = False + + def _install_post_reboot_agent(self, rclocal): + """ + Install post-reboot agent for running custom script after reboot. + As part of this process, we are editing the rclocal file to run a + VMware script, which in turn is resposible for handling the user + script. + @param: path to rc local. + """ + LOG.debug("Installing post-reboot customization from %s to %s", + self.directory, rclocal) + if not self.has_previous_agent(rclocal): + LOG.info("Adding post-reboot customization agent to rc.local") + new_content = dedent(""" + # Run post-reboot guest customization + /bin/sh %s + exit 0 + """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT + existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') + st = os.stat(rclocal) + # "x" flag should be set + mode = st.st_mode | stat.S_IEXEC + util.write_file(rclocal, existing_rclocal + new_content, mode) + + else: + # We don't need to update rclocal file everytime a customization + # is requested. It just needs to be done for the first time. + LOG.info("Post-reboot guest customization agent is already " + "registered in rc.local") + LOG.debug("Installing post-reboot customization agent finished: %s", + self.postreboot) + + def has_previous_agent(self, rclocal): + searchstring = "# Run post-reboot guest customization" + if searchstring in open(rclocal).read(): + return True + return False + + def find_rc_local(self): + """ + Determine if rc local is present. + """ + rclocal = "" + if os.path.exists(CustomScriptConstant.RC_LOCAL): + LOG.debug("rc.local detected.") + # resolving in case of symlink + rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL) + LOG.debug("rc.local resolved to %s", rclocal) + else: + LOG.warning("Can't find rc.local, post-customization " + "will be run before reboot") + return rclocal + + def install_agent(self): + rclocal = self.find_rc_local() + if rclocal: + self._install_post_reboot_agent(rclocal) + self.postreboot = True + + def execute(self): + """ + This method executes post-customization script before or after reboot + based on the presence of rc local. + """ + self.prepare_script() + self.install_agent() + if not self.postreboot: + LOG.warning("Executing post-customization script inline") + util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) + else: + LOG.debug("Scheduling custom script to run post reboot") + if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): + os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) + # Script "post-customize-guest.sh" and user uploaded script are + # are present in the same directory and needs to copied to a temp + # directory to be executed post reboot. User uploaded script is + # saved as customize.sh in the temp directory. + # post-customize-guest.sh excutes customize.sh after reboot. + LOG.debug("Copying post-customization script") + util.copy(self.scriptpath, + CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") + LOG.debug("Copying script to run post-customization script") + util.copy( + os.path.join(self.directory, + CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), + CustomScriptConstant.POST_CUST_RUN_SCRIPT) + LOG.info("Creating post-reboot pending marker") + util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 2fb07c59..2d8900e2 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -161,7 +161,7 @@ class NicConfigurator(object): if nic.primary and v4.gateways: self.ipv4PrimaryGateway = v4.gateways[0] subnet.update({'gateway': self.ipv4PrimaryGateway}) - return [subnet] + return ([subnet], route_list) # Add routes if there is no primary nic if not self._primaryNic: diff --git a/cloudinit/sources/tests/__init__.py b/cloudinit/sources/tests/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/cloudinit/sources/tests/__init__.py diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py new file mode 100644 index 00000000..af151154 --- /dev/null +++ b/cloudinit/sources/tests/test_init.py @@ -0,0 +1,202 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import six +import stat + +from cloudinit.helpers import Paths +from cloudinit.sources import ( + INSTANCE_JSON_FILE, DataSource) +from cloudinit.tests.helpers import CiTestCase, skipIf +from cloudinit.user_data import UserDataProcessor +from cloudinit import util + + +class DataSourceTestSubclassNet(DataSource): + + dsname = 'MyTestSubclass' + + def __init__(self, sys_cfg, distro, paths, custom_userdata=None): + super(DataSourceTestSubclassNet, self).__init__( + sys_cfg, distro, paths) + self._custom_userdata = custom_userdata + + def _get_cloud_name(self): + return 'SubclassCloudName' + + def _get_data(self): + self.metadata = {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'} + if self._custom_userdata: + self.userdata_raw = self._custom_userdata + else: + self.userdata_raw = 'userdata_raw' + self.vendordata_raw = 'vendordata_raw' + return True + + +class InvalidDataSourceTestSubclassNet(DataSource): + pass + + +class TestDataSource(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestDataSource, self).setUp() + self.sys_cfg = {'datasource': {'_undef': {'key1': False}}} + self.distro = 'distrotest' # generally should be a Distro object + self.paths = Paths({}) + self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) + + def test_datasource_init(self): + """DataSource initializes metadata attributes, ds_cfg and ud_proc.""" + self.assertEqual(self.paths, self.datasource.paths) + self.assertEqual(self.sys_cfg, self.datasource.sys_cfg) + self.assertEqual(self.distro, self.datasource.distro) + self.assertIsNone(self.datasource.userdata) + self.assertEqual({}, self.datasource.metadata) + self.assertIsNone(self.datasource.userdata_raw) + self.assertIsNone(self.datasource.vendordata) + self.assertIsNone(self.datasource.vendordata_raw) + self.assertEqual({'key1': False}, self.datasource.ds_cfg) + self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor) + + def test_datasource_init_gets_ds_cfg_using_dsname(self): + """Init uses DataSource.dsname for sourcing ds_cfg.""" + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} + distro = 'distrotest' # generally should be a Distro object + paths = Paths({}) + datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths) + self.assertEqual({'key2': False}, datasource.ds_cfg) + + def test_str_is_classname(self): + """The string representation of the datasource is the classname.""" + self.assertEqual('DataSource', str(self.datasource)) + self.assertEqual( + 'DataSourceTestSubclassNet', + str(DataSourceTestSubclassNet('', '', self.paths))) + + def test__get_data_unimplemented(self): + """Raise an error when _get_data is not implemented.""" + with self.assertRaises(NotImplementedError) as context_manager: + self.datasource.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + datasource2 = InvalidDataSourceTestSubclassNet( + self.sys_cfg, self.distro, self.paths) + with self.assertRaises(NotImplementedError) as context_manager: + datasource2.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + + def test_get_data_calls_subclass__get_data(self): + """Datasource.get_data uses the subclass' version of _get_data.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + self.assertEqual( + {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + datasource.metadata) + self.assertEqual('userdata_raw', datasource.userdata_raw) + self.assertEqual('vendordata_raw', datasource.vendordata_raw) + + def test_get_data_write_json_instance_data(self): + """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected = { + 'base64-encoded-keys': [], + 'v1': { + 'availability-zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'instance-id': 'iid-datasource', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + 'ds': { + 'meta-data': {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + 'user-data': 'userdata_raw', + 'vendor-data': 'vendordata_raw'}} + self.assertEqual(expected, util.load_json(content)) + file_stat = os.stat(json_file) + self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) + + def test_get_data_handles_redacted_unserializable_content(self): + """get_data warns unserializable content in INSTANCE_JSON_FILE.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected_userdata = { + 'key1': 'val1', + 'key2': { + 'key2.1': "Warning: redacted unserializable type <class" + " 'cloudinit.helpers.Paths'>"}} + instance_json = util.load_json(content) + self.assertEqual( + expected_userdata, instance_json['ds']['user-data']) + + @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes") + def test_get_data_base64encodes_unserializable_bytes(self): + """On py3, get_data base64encodes any unserializable content.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + instance_json = util.load_json(content) + self.assertEqual( + ['ds/user-data/key2/key2.1'], + instance_json['base64-encoded-keys']) + self.assertEqual( + {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, + instance_json['ds']['user-data']) + + @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes") + def test_get_data_handles_bytes_values(self): + """On py2 get_data handles bytes values without having to b64encode.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + instance_json = util.load_json(content) + self.assertEqual([], instance_json['base64-encoded-keys']) + self.assertEqual( + {'key1': 'val1', 'key2': {'key2.1': '\x123'}}, + instance_json['ds']['user-data']) + + @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") + def test_non_utf8_encoding_logs_warning(self): + """When non-utf-8 values exist in py2 instance-data is not written.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + self.assertFalse(os.path.exists(json_file)) + self.assertIn( + "WARNING: Error persisting instance-data.json: 'utf8' codec can't" + " decode byte 0xaa in position 2: invalid start byte", + self.logs.getvalue()) diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py index 5d7adf70..c98a1b53 100644 --- a/cloudinit/temp_utils.py +++ b/cloudinit/temp_utils.py @@ -28,13 +28,18 @@ def _tempfile_dir_arg(odir=None, needs_exe=False): if odir is not None: return odir + if needs_exe: + tdir = _EXE_ROOT_TMPDIR + if not os.path.isdir(tdir): + os.makedirs(tdir) + os.chmod(tdir, 0o1777) + return tdir + global _TMPDIR if _TMPDIR: return _TMPDIR - if needs_exe: - tdir = _EXE_ROOT_TMPDIR - elif os.getuid() == 0: + if os.getuid() == 0: tdir = _ROOT_TMPDIR else: tdir = os.environ.get('TMPDIR', '/tmp') diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 6f88a5b7..0080c729 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -3,7 +3,6 @@ from __future__ import print_function import functools -import json import logging import os import shutil @@ -20,6 +19,11 @@ try: except ImportError: from contextlib2 import ExitStack +try: + from configparser import ConfigParser +except ImportError: + from ConfigParser import ConfigParser + from cloudinit import helpers as ch from cloudinit import util @@ -114,6 +118,16 @@ class TestCase(unittest2.TestCase): self.addCleanup(m.stop) setattr(self, attr, p) + # prefer python3 read_file over readfp but allow fallback + def parse_and_read(self, contents): + parser = ConfigParser() + if hasattr(parser, 'read_file'): + parser.read_file(contents) + elif hasattr(parser, 'readfp'): + # pylint: disable=W1505 + parser.readfp(contents) + return parser + class CiTestCase(TestCase): """This is the preferred test case base class unless user @@ -159,6 +173,18 @@ class CiTestCase(TestCase): dir = self.tmp_dir() return os.path.normpath(os.path.abspath(os.path.join(dir, path))) + def assertRaisesCodeEqual(self, expected, found): + """Handle centos6 having different context manager for assertRaises. + with assertRaises(Exception) as e: + raise Exception("BOO") + + centos6 will have e.exception as an integer. + anything nwere will have it as something with a '.code'""" + if isinstance(found, int): + self.assertEqual(expected, found) + else: + self.assertEqual(expected, found.code) + class ResourceUsingTestCase(CiTestCase): @@ -337,12 +363,6 @@ def dir2dict(startdir, prefix=None): return flist -def json_dumps(data): - # print data in nicely formatted json. - return json.dumps(data, indent=1, sort_keys=True, - separators=(',', ': ')) - - def wrap_and_call(prefix, mocks, func, *args, **kwargs): """ call func(args, **kwargs) with mocks applied, then unapplies mocks @@ -402,4 +422,12 @@ if not hasattr(mock.Mock, 'assert_not_called'): mock.Mock.assert_not_called = __mock_assert_not_called +# older unittest2.TestCase (centos6) do not have assertRaisesRegex +# And setting assertRaisesRegex to assertRaisesRegexp causes +# https://github.com/PyCQA/pylint/issues/1653 . So the workaround. +if not hasattr(unittest2.TestCase, 'assertRaisesRegex'): + def _tricky(*args, **kwargs): + return unittest2.TestCase.assertRaisesRegexp + unittest2.TestCase.assertRaisesRegex = _tricky + # vi: ts=4 expandtab diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py new file mode 100644 index 00000000..ba6bf699 --- /dev/null +++ b/cloudinit/tests/test_util.py @@ -0,0 +1,46 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.util""" + +import logging + +import cloudinit.util as util + +from cloudinit.tests.helpers import CiTestCase, mock + +LOG = logging.getLogger(__name__) + +MOUNT_INFO = [ + '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', + '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' +] + + +class TestUtil(CiTestCase): + + def test_parse_mount_info_no_opts_no_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_no_opts_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_with_opts(self): + result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) + self.assertEqual( + ('/dev/sda1', 'btrfs', '/', 'ro,relatime'), + result + ) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_rw(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, True) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_ro(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, False) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0e0f5b4c..0a5be0b3 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -273,7 +273,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, headers_cb=None, sleep_time=1, - exception_cb=None): + exception_cb=None, sleep_time_cb=None): """ urls: a list of urls to try max_wait: roughly the maximum time to wait before giving up @@ -286,6 +286,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, for request. exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. + sleep_time_cb: call method with 2 arguments (response, loop_n) that + generates the next sleep time. the idea of this routine is to wait for the EC2 metdata service to come up. On both Eucalyptus and EC2 we have seen the case where @@ -301,6 +303,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, service but is not going to find one. It is possible that the instance data host (169.254.169.254) may be firewalled off Entirely for a sytem, meaning that the connection will block forever unless a timeout is set. + + A value of None for max_wait will retry indefinitely. """ start_time = time.time() @@ -311,18 +315,24 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb = log_status_cb def timeup(max_wait, start_time): - return ((max_wait <= 0 or max_wait is None) or - (time.time() - start_time > max_wait)) + if (max_wait is None): + return False + return ((max_wait <= 0) or (time.time() - start_time > max_wait)) loop_n = 0 + response = None while True: - sleep_time = int(loop_n / 5) + 1 + if sleep_time_cb is not None: + sleep_time = sleep_time_cb(response, loop_n) + else: + sleep_time = int(loop_n / 5) + 1 for url in urls: now = time.time() if loop_n != 0: if timeup(max_wait, start_time): break - if timeout and (now + timeout > (start_time + max_wait)): + if (max_wait is not None and + timeout and (now + timeout > (start_time + max_wait))): # shorten timeout to not run way over max_time timeout = int((start_time + max_wait) - now) @@ -354,10 +364,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, url_exc = e time_taken = int(time.time() - start_time) - status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url, - time_taken, - max_wait, - reason) + max_wait_str = "%ss" % max_wait if max_wait else "unlimited" + status_msg = "Calling '%s' failed [%s/%s]: %s" % (url, + time_taken, + max_wait_str, + reason) status_cb(status_msg) if exception_cb: # This can be used to alter the headers that will be sent diff --git a/cloudinit/util.py b/cloudinit/util.py index 6c014ba5..338fb971 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -253,12 +253,18 @@ class ProcessExecutionError(IOError): self.exit_code = exit_code if not stderr: - self.stderr = self.empty_attr + if stderr is None: + self.stderr = self.empty_attr + else: + self.stderr = stderr else: self.stderr = self._indent_text(stderr) if not stdout: - self.stdout = self.empty_attr + if stdout is None: + self.stdout = self.empty_attr + else: + self.stdout = stdout else: self.stdout = self._indent_text(stdout) @@ -533,15 +539,6 @@ def multi_log(text, console=True, stderr=True, log.log(log_level, text) -def load_json(text, root_types=(dict,)): - decoded = json.loads(decode_binary(text)) - if not isinstance(decoded, tuple(root_types)): - expected_types = ", ".join([str(t) for t in root_types]) - raise TypeError("(%s) root types expected, got %s instead" - % (expected_types, type(decoded))) - return decoded - - def is_ipv4(instr): """determine if input string is a ipv4 address. return boolean.""" toks = instr.split('.') @@ -900,17 +897,17 @@ def load_yaml(blob, default=None, allowed=(dict,)): "of length %s with allowed root types %s", len(blob), allowed) converted = safeyaml.load(blob) - if not isinstance(converted, allowed): + if converted is None: + LOG.debug("loaded blob returned None, returning default.") + converted = default + elif not isinstance(converted, allowed): # Yes this will just be caught, but thats ok for now... raise TypeError(("Yaml load allows %s root types," " but got %s instead") % (allowed, type_utils.obj_name(converted))) loaded = converted except (yaml.YAMLError, TypeError, ValueError): - if len(blob) == 0: - LOG.debug("load_yaml given empty string, returning default") - else: - logexc(LOG, "Failed loading yaml blob") + logexc(LOG, "Failed loading yaml blob") return loaded @@ -1398,6 +1395,32 @@ def get_output_cfg(cfg, mode): return ret +def get_config_logfiles(cfg): + """Return a list of log file paths from the configuration dictionary. + + @param cfg: The cloud-init merged configuration dictionary. + """ + logs = [] + if not cfg or not isinstance(cfg, dict): + return logs + default_log = cfg.get('def_log_file') + if default_log: + logs.append(default_log) + for fmt in get_output_cfg(cfg, None): + if not fmt: + continue + match = re.match('(?P<type>\||>+)\s*(?P<target>.*)', fmt) + if not match: + continue + target = match.group('target') + parts = target.split() + if len(parts) == 1: + logs.append(target) + elif ['tee', '-a'] == parts[:2]: + logs.append(parts[2]) + return list(set(logs)) + + def logexc(log, msg, *args): # Setting this here allows this to change # levels easily (not always error level) @@ -1454,7 +1477,31 @@ def ensure_dirs(dirlist, mode=0o755): ensure_dir(d, mode) +def load_json(text, root_types=(dict,)): + decoded = json.loads(decode_binary(text)) + if not isinstance(decoded, tuple(root_types)): + expected_types = ", ".join([str(t) for t in root_types]) + raise TypeError("(%s) root types expected, got %s instead" + % (expected_types, type(decoded))) + return decoded + + +def json_serialize_default(_obj): + """Handler for types which aren't json serializable.""" + try: + return 'ci-b64:{0}'.format(b64e(_obj)) + except AttributeError: + return 'Warning: redacted unserializable type {0}'.format(type(_obj)) + + +def json_dumps(data): + """Return data in nicely formatted json.""" + return json.dumps(data, indent=1, sort_keys=True, + separators=(',', ': '), default=json_serialize_default) + + def yaml_dumps(obj, explicit_start=True, explicit_end=True): + """Return data in nicely formatted yaml.""" return yaml.safe_dump(obj, line_break="\n", indent=4, @@ -1540,6 +1587,10 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): mtypes = list(mtype) elif mtype is None: mtypes = None + else: + raise TypeError( + 'Unsupported type provided for mtype parameter: {_type}'.format( + _type=type(mtype))) # clean up 'mtype' input a bit based on platform. platsys = platform.system().lower() @@ -1788,58 +1839,60 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, env = env.copy() env.update(update_env) - try: - if target_path(target) != "/": - args = ['chroot', target] + list(args) + if target_path(target) != "/": + args = ['chroot', target] + list(args) - if not logstring: - LOG.debug(("Running command %s with allowed return codes %s" - " (shell=%s, capture=%s)"), args, rcs, shell, capture) - else: - LOG.debug(("Running hidden command to protect sensitive " - "input/output logstring: %s"), logstring) - - stdin = None - stdout = None - stderr = None - if capture: - stdout = subprocess.PIPE - stderr = subprocess.PIPE - if data is None: - # using devnull assures any reads get null, rather - # than possibly waiting on input. - devnull_fp = open(os.devnull) - stdin = devnull_fp - else: - stdin = subprocess.PIPE - if not isinstance(data, bytes): - data = data.encode() + if not logstring: + LOG.debug(("Running command %s with allowed return codes %s" + " (shell=%s, capture=%s)"), args, rcs, shell, capture) + else: + LOG.debug(("Running hidden command to protect sensitive " + "input/output logstring: %s"), logstring) + + stdin = None + stdout = None + stderr = None + if capture: + stdout = subprocess.PIPE + stderr = subprocess.PIPE + if data is None: + # using devnull assures any reads get null, rather + # than possibly waiting on input. + devnull_fp = open(os.devnull) + stdin = devnull_fp + else: + stdin = subprocess.PIPE + if not isinstance(data, bytes): + data = data.encode() + try: sp = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, env=env, shell=shell) (out, err) = sp.communicate(data) - - # Just ensure blank instead of none. - if not out and capture: - out = b'' - if not err and capture: - err = b'' - if decode: - def ldecode(data, m='utf-8'): - if not isinstance(data, bytes): - return data - return data.decode(m, decode) - - out = ldecode(out) - err = ldecode(err) except OSError as e: - raise ProcessExecutionError(cmd=args, reason=e, - errno=e.errno) + raise ProcessExecutionError( + cmd=args, reason=e, errno=e.errno, + stdout="-" if decode else b"-", + stderr="-" if decode else b"-") finally: if devnull_fp: devnull_fp.close() + # Just ensure blank instead of none. + if not out and capture: + out = b'' + if not err and capture: + err = b'' + if decode: + def ldecode(data, m='utf-8'): + if not isinstance(data, bytes): + return data + return data.decode(m, decode) + + out = ldecode(out) + err = ldecode(err) + rc = sp.returncode if rc not in rcs: raise ProcessExecutionError(stdout=out, stderr=err, @@ -2010,7 +2063,7 @@ def expand_package_list(version_fmt, pkgs): return pkglist -def parse_mount_info(path, mountinfo_lines, log=LOG): +def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False): """Return the mount information for PATH given the lines from /proc/$$/mountinfo.""" @@ -2072,11 +2125,16 @@ def parse_mount_info(path, mountinfo_lines, log=LOG): match_mount_point = mount_point match_mount_point_elements = mount_point_elements + mount_options = parts[5] - if devpth and fs_type and match_mount_point: - return (devpth, fs_type, match_mount_point) + if get_mnt_opts: + if devpth and fs_type and match_mount_point and mount_options: + return (devpth, fs_type, match_mount_point, mount_options) else: - return None + if devpth and fs_type and match_mount_point: + return (devpth, fs_type, match_mount_point) + + return None def parse_mtab(path): @@ -2146,7 +2204,7 @@ def parse_mount(path): return None -def get_mount_info(path, log=LOG): +def get_mount_info(path, log=LOG, get_mnt_opts=False): # Use /proc/$$/mountinfo to find the device where path is mounted. # This is done because with a btrfs filesystem using os.stat(path) # does not return the ID of the device. @@ -2178,7 +2236,7 @@ def get_mount_info(path, log=LOG): mountinfo_path = '/proc/%s/mountinfo' % os.getpid() if os.path.exists(mountinfo_path): lines = load_file(mountinfo_path).splitlines() - return parse_mount_info(path, lines, log) + return parse_mount_info(path, lines, log, get_mnt_opts) elif os.path.exists("/etc/mtab"): return parse_mtab(path) else: @@ -2286,7 +2344,8 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep): missing.append(f) if len(missing): - raise ValueError("Missing required files: %s", ','.join(missing)) + raise ValueError( + 'Missing required files: {files}'.format(files=','.join(missing))) return ret @@ -2563,4 +2622,10 @@ def wait_for_files(flist, maxwait, naplen=.5, log_pre=""): return need +def mount_is_read_write(mount_point): + """Check whether the given mount point is mounted rw""" + result = get_mount_info(mount_point, get_mnt_opts=True) + mount_opts = result[-1].split(',') + return mount_opts[0] == 'rw' + # vi: ts=4 expandtab diff --git a/cloudinit/version.py b/cloudinit/version.py index 3255f399..be6262d6 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "17.1" +__VERSION__ = "17.2" FEATURES = [ # supports network config version 1 diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst index 859409a6..f2976fdf 100644 --- a/doc/rtd/topics/boot.rst +++ b/doc/rtd/topics/boot.rst @@ -1,3 +1,5 @@ +.. _boot_stages: + *********** Boot Stages *********** @@ -74,7 +76,7 @@ Network * **systemd service**: ``cloud-init.service`` * **runs**: After local stage and configured networking is up. * **blocks**: As much of remaining boot as possible. - * **modules**: ``init_modules`` + * **modules**: ``cloud_init_modules`` in **/etc/cloud/cloud.cfg** This stage requires all configured networking to be online, as it will fully process any user-data that is found. Here, processing means: @@ -104,7 +106,7 @@ Config * **systemd service**: ``cloud-config.service`` * **runs**: After network stage. * **blocks**: None. - * **modules**: ``config_modules`` + * **modules**: ``cloud_config_modules`` in **/etc/cloud/cloud.cfg** This stage runs config modules only. Modules that do not really have an effect on other stages of boot are run here. @@ -115,7 +117,7 @@ Final * **systemd service**: ``cloud-final.service`` * **runs**: As final part of boot (traditional "rc.local") * **blocks**: None. - * **modules**: ``final_modules`` + * **modules**: ``cloud_final_modules`` in **/etc/cloud/cloud.cfg** This stage runs as late in boot as possible. Any scripts that a user is accustomed to running after logging into a system should run correctly here. @@ -125,4 +127,9 @@ Things that run here include * configuration management plugins (puppet, chef, salt-minion) * user-scripts (including ``runcmd``). +For scripts external to cloud-init looking to wait until cloud-init +finished, the ``cloud-init status`` subcommand can help block external +scripts until cloud-init is done without having to write your own systemd +units dependency chains. See :ref:`cli_status` for more info. + .. vi: textwidth=78 diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst index 31eaba53..ae3a0c74 100644 --- a/doc/rtd/topics/capabilities.rst +++ b/doc/rtd/topics/capabilities.rst @@ -1,3 +1,5 @@ +.. _capabilities: + ************ Capabilities ************ @@ -39,17 +41,19 @@ Currently defined feature names include: see :ref:`network_config_v2` documentation for examples. -CLI Interface : +CLI Interface +============= -``cloud-init features`` will print out each feature supported. If cloud-init -does not have the features subcommand, it also does not support any features -described in this document. + The command line documentation is accessible on any cloud-init +installed system: .. code-block:: bash % cloud-init --help - usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force] - {init,modules,query,single,dhclient-hook,features} ... + usage: cloud-init [-h] [--version] [--file FILES] + [--debug] [--force] + {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status} + ... optional arguments: -h, --help show this help message and exit @@ -61,7 +65,7 @@ described in this document. your own risk) Subcommands: - {init,modules,single,dhclient-hook,features,analyze,devel} + {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status} init initializes cloud-init and performs initial modules modules activates modules using a given configuration key single run a single module @@ -69,11 +73,153 @@ described in this document. features list defined features analyze Devel tool: Analyze cloud-init logs and data devel Run development tools + collect-logs Collect and tar all cloud-init debug info + clean Remove logs and artifacts so cloud-init can re-run. + status Report cloud-init status or wait on completion. + +CLI Subcommand details +====================== + +.. _cli_features: + +cloud-init features +------------------- +Print out each feature supported. If cloud-init does not have the +features subcommand, it also does not support any features described in +this document. + +.. code-block:: bash % cloud-init features NETWORK_CONFIG_V1 NETWORK_CONFIG_V2 +.. _cli_status: + +cloud-init status +----------------- +Report whether cloud-init is running, done, disabled or errored. Exits +non-zero if an error is detected in cloud-init. + * **--long**: Detailed status information. + * **--wait**: Block until cloud-init completes. + +.. code-block:: bash + + % cloud-init status --long + status: done + time: Wed, 17 Jan 2018 20:41:59 +0000 + detail: + DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net] + + # Cloud-init running still short versus long options + % cloud-init status + status: running + % cloud-init status --long + status: running + time: Fri, 26 Jan 2018 21:39:43 +0000 + detail: + Running in stage: init-local + +.. _cli_collect_logs: + +cloud-init collect-logs +----------------------- +Collect and tar cloud-init generated logs, data files and system +information for triage. This subcommand is integrated with apport. + +**Note**: Ubuntu users can file bugs with `ubuntu-bug cloud-init` to +automaticaly attach these logs to a bug report. + +Logs collected are: + + * /var/log/cloud-init*log + * /run/cloud-init + * cloud-init package version + * dmesg output + * journalctl output + * /var/lib/cloud/instance/user-data.txt + +.. _cli_analyze: + +cloud-init analyze +------------------ +Get detailed reports of where cloud-init spends most of its time. See +:ref:`boot_time_analysis` for more info. + + * **blame** Report ordered by most costly operations. + * **dump** Machine-readable JSON dump of all cloud-init tracked events. + * **show** show time-ordered report of the cost of operations during each + boot stage. + +.. _cli_devel: + +cloud-init devel +---------------- +Collection of development tools under active development. These tools will +likely be promoted to top-level subcommands when stable. + + * ``cloud-init devel schema``: A **#cloud-config** format and schema + validator. It accepts a cloud-config yaml file and annotates potential + schema errors locally without the need for deployment. Schema + validation is work in progress and supports a subset of cloud-config + modules. + +.. _cli_clean: + +cloud-init clean +---------------- +Remove cloud-init artifacts from /var/lib/cloud and optionally reboot the +machine to so cloud-init re-runs all stages as it did on first boot. + + * **--logs**: Optionally remove /var/log/cloud-init*log files. + * **--reboot**: Reboot the system after removing artifacts. + +.. _cli_init: + +cloud-init init +--------------- +Generally run by OS init systems to execute cloud-init's stages +*init* and *init-local*. See :ref:`boot_stages` for more info. +Can be run on the commandline, but is generally gated to run only once +due to semaphores in **/var/lib/cloud/instance/sem/** and +**/var/lib/cloud/sem**. + + * **--local**: Run *init-local* stage instead of *init*. + +.. _cli_modules: + +cloud-init modules +------------------ +Generally run by OS init systems to execute *modules:config* and +*modules:final* boot stages. This executes cloud config :ref:`modules` +configured to run in the init, config and final stages. The modules are +declared to run in various boot stages in the file +**/etc/cloud/cloud.cfg** under keys **cloud_init_modules**, +**cloud_init_modules** and **cloud_init_modules**. Can be run on the +commandline, but each module is gated to run only once due to semaphores +in ``/var/lib/cloud/``. + + * **--mode (init|config|final)**: Run *modules:init*, *modules:config* or + *modules:final* cloud-init stages. See :ref:`boot_stages` for more info. + +.. _cli_single: + +cloud-init single +----------------- +Attempt to run a single named cloud config module. The following example +re-runs the cc_set_hostname module ignoring the module default frequency +of once-per-instance: + + * **--name**: The cloud-config module name to run + * **--frequency**: Optionally override the declared module frequency + with one of (always|once-per-instance|once) + +.. code-block:: bash + + % cloud-init single --name set_hostname --frequency always + +**Note**: Mileage may vary trying to re-run each cloud-config module, as +some are not idempotent. .. _Cloud-init: https://launchpad.net/cloud-init .. vi: textwidth=78 diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst index 4e43dd57..c2b47edc 100644 --- a/doc/rtd/topics/debugging.rst +++ b/doc/rtd/topics/debugging.rst @@ -7,6 +7,7 @@ Overview This topic will discuss general approaches for test and debug of cloud-init on deployed instances. +.. _boot_time_analysis: Boot Time Analysis - cloud-init analyze ====================================== diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index cdb0f419..7b146751 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -1,3 +1,5 @@ +.. _modules: + ******* Modules ******* diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst index ce3a1bde..2f8ab54c 100644 --- a/doc/rtd/topics/network-config-format-v1.rst +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -349,7 +349,7 @@ For any network device (one of the Config Types) users can define a list of entries will create interface alias allowing a single interface to use different ip configurations. -Valid keys for for ``subnets`` include the following: +Valid keys for ``subnets`` include the following: - ``type``: Specify the subnet type. - ``control``: Specify manual, auto or hotplug. Indicates how the interface diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst index d668e3f4..bf04bb3c 100644 --- a/doc/rtd/topics/tests.rst +++ b/doc/rtd/topics/tests.rst @@ -118,19 +118,19 @@ TreeRun and TreeCollect If working on a cloud-init feature or resolving a bug, it may be useful to run the current copy of cloud-init in the integration testing environment. -The integration testing suite can automatically build a deb based on the +The integration testing suite can automatically build a deb based on the current working tree of cloud-init and run the test suite using this deb. The ``tree_run`` and ``tree_collect`` commands take the same arguments as -the ``run`` and ``collect`` commands. These commands will build a deb and -write it into a temporary file, then start the test suite and pass that deb +the ``run`` and ``collect`` commands. These commands will build a deb and +write it into a temporary file, then start the test suite and pass that deb in. To build a deb only, and not run the test suite, the ``bddeb`` command can be used. Note that code in the cloud-init working tree that has not been committed when the cloud-init deb is built will still be included. To build a cloud-init deb from or use the ``tree_run`` command using a copy of -cloud-init located in a different directory, use the option ``--cloud-init +cloud-init located in a different directory, use the option ``--cloud-init /path/to/cloud-init``. .. code-block:: bash @@ -383,7 +383,7 @@ Development Checklist * Valid unit tests validating output collected * Passes pylint & pep8 checks * Placed in the appropriate sub-folder in the test cases directory -* Tested by running the test: +* Tested by running the test: .. code-block:: bash @@ -392,6 +392,32 @@ Development Checklist --test modules/your_test.yaml \ [--deb <build of cloud-init>] + +Platforms +========= + +EC2 +--- +To run on the EC2 platform it is required that the user has an AWS credentials +configuration file specifying his or her access keys and a default region. +These configuration files are the standard that the AWS cli and other AWS +tools utilize for interacting directly with AWS itself and are normally +generated when running ``aws configure``: + +.. code-block:: bash + + $ cat $HOME/.aws/credentials + [default] + aws_access_key_id = <KEY HERE> + aws_secret_access_key = <KEY HERE> + +.. code-block:: bash + + $ cat $HOME/.aws/config + [default] + region = us-west-2 + + Architecture ============ @@ -455,7 +481,7 @@ replace the default. If the data is a dictionary then the value will be the result of merging that dictionary from the default config and that dictionary from the overrides. -Merging is done using the function +Merging is done using the function ``tests.cloud_tests.config.merge_config``, which can be examined for more detail on config merging behavior. diff --git a/integration-requirements.txt b/integration-requirements.txt new file mode 100644 index 00000000..45baac6a --- /dev/null +++ b/integration-requirements.txt @@ -0,0 +1,20 @@ +# PyPI requirements for cloud-init integration testing +# https://cloudinit.readthedocs.io/en/latest/topics/tests.html +# +# Note: Changes to this requirements may require updates to +# the packages/pkg-deps.json file as well. +# + +# ec2 backend +boto3==1.5.9 + +# ssh communication +paramiko==2.4.0 + +# lxd backend +# 01/10/2018: enables use of lxd as snap support +git+https://github.com/lxc/pylxd.git@0722955260a6557e6d2ffde1896bfe0707bbca27 + + +# finds latest image information +bzr+lp:simplestreams @@ -18,11 +18,14 @@ import tempfile import setuptools from setuptools.command.install import install +from setuptools.command.egg_info import egg_info from distutils.errors import DistutilsArgError import subprocess +RENDERED_TMPD_PREFIX = "RENDERED_TEMPD" + def is_f(p): return os.path.isfile(p) @@ -107,7 +110,7 @@ def render_tmpl(template): return template topdir = os.path.dirname(sys.argv[0]) - tmpd = tempfile.mkdtemp(dir=topdir) + tmpd = tempfile.mkdtemp(dir=topdir, prefix=RENDERED_TMPD_PREFIX) atexit.register(shutil.rmtree, tmpd) bname = os.path.basename(template).rstrip(tmpl_ext) fpath = os.path.join(tmpd, bname) @@ -156,6 +159,25 @@ elif os.path.isfile('/etc/redhat-release'): USR_LIB_EXEC = "usr/libexec" +class MyEggInfo(egg_info): + """This makes sure to not include the rendered files in SOURCES.txt.""" + + def find_sources(self): + ret = egg_info.find_sources(self) + # update the self.filelist. + self.filelist.exclude_pattern(RENDERED_TMPD_PREFIX + ".*", + is_regex=True) + # but since mfname is already written we have to update it also. + mfname = os.path.join(self.egg_info, "SOURCES.txt") + if os.path.exists(mfname): + with open(mfname) as fp: + files = [f for f in fp + if not f.startswith(RENDERED_TMPD_PREFIX)] + with open(mfname, "w") as fp: + fp.write(''.join(files)) + return ret + + # TODO: Is there a better way to do this?? class InitsysInstallData(install): init_system = None @@ -229,6 +251,7 @@ if os.uname()[0] != 'FreeBSD': # adding on the right init system configuration files cmdclass = { 'install': InitsysInstallData, + 'egg_info': MyEggInfo, } requirements = read_requires() diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl index bf6b2961..ff9c644d 100644 --- a/systemd/cloud-init-local.service.tmpl +++ b/systemd/cloud-init-local.service.tmpl @@ -13,12 +13,6 @@ Before=shutdown.target Before=sysinit.target Conflicts=shutdown.target {% endif %} -{% if variant in ["suse"] %} -# Other distros use Before=sysinit.target. There is not a clearly identified -# reason for usage of basic.target instead. -Before=basic.target -Conflicts=shutdown.target -{% endif %} RequiresMountsFor=/var/lib/cloud [Service] diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py index 98c1d6c7..dd436989 100644 --- a/tests/cloud_tests/__init__.py +++ b/tests/cloud_tests/__init__.py @@ -10,6 +10,12 @@ TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases') TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases') TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2]) +# This domain contains reverse lookups for hostnames that are used. +# The primary reason is so sudo will return quickly when it attempts +# to look up the hostname. i9n is just short for 'integration'. +# see also bug 1730744 for why we had to do this. +CI_DOMAIN = "i9n.cloud-init.io" + def _initialize_logging(): """Configure logging for cloud_tests.""" diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py index fba8a0c7..a6d5069f 100644 --- a/tests/cloud_tests/bddeb.py +++ b/tests/cloud_tests/bddeb.py @@ -8,7 +8,7 @@ import tempfile from cloudinit import util as c_util from tests.cloud_tests import (config, LOG) -from tests.cloud_tests import (platforms, images, snapshots, instances) +from tests.cloud_tests import platforms from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) pre_reqs = ['devscripts', 'equivs', 'git', 'tar'] @@ -84,18 +84,18 @@ def setup_build(args): # set up image LOG.info('acquiring image for os: %s', args.build_os) img_conf = config.load_os_config(platform.platform_name, args.build_os) - image_call = partial(images.get_image, platform, img_conf) + image_call = partial(platforms.get_image, platform, img_conf) with PlatformComponent(image_call) as image: # set up snapshot - snapshot_call = partial(snapshots.get_snapshot, image) + snapshot_call = partial(platforms.get_snapshot, image) with PlatformComponent(snapshot_call) as snapshot: # create instance with cloud-config to set it up LOG.info('creating instance to build deb in') empty_cloud_config = "#cloud-config\n{}" instance_call = partial( - instances.get_instance, snapshot, empty_cloud_config, + platforms.get_instance, snapshot, empty_cloud_config, use_desc='build cloud-init deb') with PlatformComponent(instance_call) as instance: diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 71ee7645..5ea88e50 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -8,7 +8,7 @@ import os from cloudinit import util as c_util from tests.cloud_tests import (config, LOG, setup_image, util) from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) -from tests.cloud_tests import (platforms, images, snapshots, instances) +from tests.cloud_tests import platforms def collect_script(instance, base_dir, script, script_name): @@ -24,16 +24,29 @@ def collect_script(instance, base_dir, script, script_name): (out, err, exit) = instance.run_script( script.encode(), rcs=False, description='collect: {}'.format(script_name)) + if err: + LOG.debug("collect script %s had stderr: %s", script_name, err) + if not isinstance(out, bytes): + raise util.PlatformError( + "Collection of '%s' returned type %s, expected bytes: %s" % + (script_name, type(out), out)) + c_util.write_file(os.path.join(base_dir, script_name), out) def collect_console(instance, base_dir): - LOG.debug('getting console log') + """Collect instance console log. + + @param instance: instance to get console log for + @param base_dir: directory to write console log to + """ + logfile = os.path.join(base_dir, 'console.log') + LOG.debug('getting console log for %s to %s', instance, logfile) try: data = instance.console_log() - except NotImplementedError as e: - data = 'Not Implemented: %s' % e - with open(os.path.join(base_dir, 'console.log'), "wb") as fp: + except NotImplementedError: + data = b'instance.console_log: not implemented' + with open(logfile, "wb") as fp: fp.write(data) @@ -64,9 +77,9 @@ def collect_test_data(args, snapshot, os_name, test_name): # skip the testcase with a warning req_features = test_config.get('required_features', []) if any(feature not in snapshot.features for feature in req_features): - LOG.warn('test config %s requires features not supported by image, ' - 'skipping.\nrequired features: %s\nsupported features: %s', - test_name, req_features, snapshot.features) + LOG.warning('test config %s requires features not supported by image, ' + 'skipping.\nrequired features: %s\nsupported features: %s', + test_name, req_features, snapshot.features) return ({}, 0) # if there are user data overrides required for this test case, apply them @@ -77,7 +90,7 @@ def collect_test_data(args, snapshot, os_name, test_name): # create test instance component = PlatformComponent( - partial(instances.get_instance, snapshot, user_data, + partial(platforms.get_instance, snapshot, user_data, block=True, start=False, use_desc=test_name)) LOG.info('collecting test data for test: %s', test_name) @@ -89,12 +102,11 @@ def collect_test_data(args, snapshot, os_name, test_name): test_output_dir, script, script_name)) for script_name, script in test_scripts.items()] - console_log = partial( - run_single, 'collect console', - partial(collect_console, instance, test_output_dir)) - res = run_stage('collect for test: {}'.format(test_name), - [start_call] + collect_calls + [console_log]) + [start_call] + collect_calls) + + instance.shutdown() + collect_console(instance, test_output_dir) return res @@ -108,7 +120,7 @@ def collect_snapshot(args, image, os_name): """ res = ({}, 1) - component = PlatformComponent(partial(snapshots.get_snapshot, image)) + component = PlatformComponent(partial(platforms.get_snapshot, image)) LOG.debug('creating snapshot for %s', os_name) with component as snapshot: @@ -136,7 +148,7 @@ def collect_image(args, platform, os_name): feature_overrides=args.feature_override) LOG.debug('os config: %s', os_config) component = PlatformComponent( - partial(images.get_image, platform, os_config)) + partial(platforms.get_image, platform, os_config)) LOG.info('acquiring image for os: %s', os_name) with component as image: diff --git a/tests/cloud_tests/config.py b/tests/cloud_tests/config.py index 52fc2bda..8bd569fd 100644 --- a/tests/cloud_tests/config.py +++ b/tests/cloud_tests/config.py @@ -92,7 +92,7 @@ def load_platform_config(platform_name, require_enabled=False): def load_os_config(platform_name, os_name, require_enabled=False, - feature_overrides={}): + feature_overrides=None): """Load configuration for os. @param platform_name: platform name to load os config for @@ -101,6 +101,8 @@ def load_os_config(platform_name, os_name, require_enabled=False, @param feature_overrides: feature flag overrides to merge with features @return_value: config dict """ + if feature_overrides is None: + feature_overrides = {} main_conf = c_util.read_conf(RELEASES_CONF) default = main_conf['default_release_config'] image = main_conf['releases'][os_name] diff --git a/tests/cloud_tests/images/__init__.py b/tests/cloud_tests/images/__init__.py deleted file mode 100644 index 106c59f3..00000000 --- a/tests/cloud_tests/images/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Main init.""" - - -def get_image(platform, config): - """Get image from platform object using os_name.""" - return platform.get_image(config) - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/instances/__init__.py b/tests/cloud_tests/instances/__init__.py deleted file mode 100644 index fc2e9cbc..00000000 --- a/tests/cloud_tests/instances/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Main init.""" - - -def get_instance(snapshot, *args, **kwargs): - """Get instance from snapshot.""" - return snapshot.launch(*args, **kwargs) - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml index fa4f845e..448aa98d 100644 --- a/tests/cloud_tests/platforms.yaml +++ b/tests/cloud_tests/platforms.yaml @@ -6,8 +6,13 @@ default_platform_config: get_image_timeout: 300 # maximum time to create instance (before waiting for cloud-init) create_instance_timeout: 60 - + private_key: cloud_init_rsa + public_key: cloud_init_rsa.pub platforms: + ec2: + enabled: true + instance-type: t2.micro + tag: cii lxd: enabled: true # overrides for image templates @@ -61,9 +66,5 @@ platforms: {{ config_get("user.vendor-data", properties.default) }} nocloud-kvm: enabled: true - private_key: id_rsa - public_key: id_rsa.pub - ec2: {} - azure: {} # vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py index 3490fe87..a01e51ac 100644 --- a/tests/cloud_tests/platforms/__init__.py +++ b/tests/cloud_tests/platforms/__init__.py @@ -2,15 +2,27 @@ """Main init.""" -from tests.cloud_tests.platforms import lxd -from tests.cloud_tests.platforms import nocloudkvm +from .ec2 import platform as ec2 +from .lxd import platform as lxd +from .nocloudkvm import platform as nocloudkvm PLATFORMS = { + 'ec2': ec2.EC2Platform, 'nocloud-kvm': nocloudkvm.NoCloudKVMPlatform, 'lxd': lxd.LXDPlatform, } +def get_image(platform, config): + """Get image from platform object using os_name.""" + return platform.get_image(config) + + +def get_instance(snapshot, *args, **kwargs): + """Get instance from snapshot.""" + return snapshot.launch(*args, **kwargs) + + def get_platform(platform_name, config): """Get the platform object for 'platform_name' and init.""" platform_cls = PLATFORMS.get(platform_name) @@ -18,4 +30,10 @@ def get_platform(platform_name, config): raise ValueError('invalid platform name: {}'.format(platform_name)) return platform_cls(config) + +def get_snapshot(image): + """Get snapshot from image.""" + return image.snapshot() + + # vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/base.py b/tests/cloud_tests/platforms/base.py deleted file mode 100644 index 28975368..00000000 --- a/tests/cloud_tests/platforms/base.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Base platform class.""" - - -class Platform(object): - """Base class for platforms.""" - - platform_name = None - - def __init__(self, config): - """Set up platform.""" - self.config = config - - def get_image(self, img_conf): - """Get image using specified image configuration. - - @param img_conf: configuration for image - @return_value: cloud_tests.images instance - """ - raise NotImplementedError - - def destroy(self): - """Clean up platform data.""" - pass - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/ec2/image.py b/tests/cloud_tests/platforms/ec2/image.py new file mode 100644 index 00000000..7bedf59d --- /dev/null +++ b/tests/cloud_tests/platforms/ec2/image.py @@ -0,0 +1,99 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""EC2 Image Base Class.""" + +from ..images import Image +from .snapshot import EC2Snapshot +from tests.cloud_tests import LOG + + +class EC2Image(Image): + """EC2 backed image.""" + + platform_name = 'ec2' + + def __init__(self, platform, config, image_ami): + """Set up image. + + @param platform: platform object + @param config: image configuration + @param image_ami: string of image ami ID + """ + super(EC2Image, self).__init__(platform, config) + self._img_instance = None + self.image_ami = image_ami + + @property + def _instance(self): + """Internal use only, returns a running instance""" + if not self._img_instance: + self._img_instance = self.platform.create_instance( + self.properties, self.config, self.features, + self.image_ami, user_data=None) + self._img_instance.start(wait=True, wait_for_cloud_init=True) + return self._img_instance + + def destroy(self): + """Delete the instance used to create a custom image.""" + if self._img_instance: + LOG.debug('terminating backing instance %s', + self._img_instance.instance.instance_id) + self._img_instance.instance.terminate() + self._img_instance.instance.wait_until_terminated() + + super(EC2Image, self).destroy() + + def _execute(self, *args, **kwargs): + """Execute command in image, modifying image.""" + self._instance.start(wait=True) + return self._instance._execute(*args, **kwargs) + + def push_file(self, local_path, remote_path): + """Copy file at 'local_path' to instance at 'remote_path'.""" + self._instance.start(wait=True) + return self._instance.push_file(local_path, remote_path) + + def run_script(self, *args, **kwargs): + """Run script in image, modifying image. + + @return_value: script output + """ + self._instance.start(wait=True) + return self._instance.run_script(*args, **kwargs) + + def snapshot(self): + """Create snapshot of image, block until done. + + Will return base image_ami if no instance has been booted, otherwise + will run the clean script, shutdown the instance, create a custom + AMI, and use that AMI once available. + """ + if not self._img_instance: + return EC2Snapshot(self.platform, self.properties, self.config, + self.features, self.image_ami, + delete_on_destroy=False) + + if self.config.get('boot_clean_script'): + self._img_instance.run_script(self.config.get('boot_clean_script')) + + self._img_instance.shutdown(wait=True) + + LOG.debug('creating custom ami from instance %s', + self._img_instance.instance.instance_id) + response = self.platform.ec2_client.create_image( + Name='%s-%s' % (self.platform.tag, self.image_ami), + InstanceId=self._img_instance.instance.instance_id + ) + image_ami_edited = response['ImageId'] + + # Create image and wait until it is in the 'available' state + image = self.platform.ec2_resource.Image(image_ami_edited) + image.wait_until_exists() + waiter = self.platform.ec2_client.get_waiter('image_available') + waiter.wait(ImageIds=[image.id]) + image.reload() + + return EC2Snapshot(self.platform, self.properties, self.config, + self.features, image_ami_edited) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/ec2/instance.py b/tests/cloud_tests/platforms/ec2/instance.py new file mode 100644 index 00000000..ab6037b1 --- /dev/null +++ b/tests/cloud_tests/platforms/ec2/instance.py @@ -0,0 +1,132 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base EC2 instance.""" +import os + +import botocore + +from ..instances import Instance +from tests.cloud_tests import LOG, util + + +class EC2Instance(Instance): + """EC2 backed instance.""" + + platform_name = "ec2" + _ssh_client = None + + def __init__(self, platform, properties, config, features, + image_ami, user_data=None): + """Set up instance. + + @param platform: platform object + @param properties: dictionary of properties + @param config: dictionary of configuration values + @param features: dictionary of supported feature flags + @param image_ami: AWS AMI ID for image to use + @param user_data: test user-data to pass to instance + """ + super(EC2Instance, self).__init__( + platform, image_ami, properties, config, features) + + self.image_ami = image_ami + self.instance = None + self.user_data = user_data + self.ssh_ip = None + self.ssh_port = 22 + self.ssh_key_file = os.path.join( + platform.config['data_dir'], platform.config['private_key']) + self.ssh_pubkey_file = os.path.join( + platform.config['data_dir'], platform.config['public_key']) + + def console_log(self): + """Collect console log from instance. + + The console log is buffered and not always present, therefore + may return empty string. + """ + try: + # OutputBytes comes from platform._decode_console_output_as_bytes + response = self.instance.console_output() + return response['OutputBytes'] + except KeyError: + if 'Output' in response: + msg = ("'OutputBytes' did not exist in console_output() but " + "'Output' did: %s..." % response['Output'][0:128]) + raise util.PlatformError('console_log', msg) + return ('No Console Output [%s]' % self.instance).encode() + + def destroy(self): + """Clean up instance.""" + if self.instance: + LOG.debug('destroying instance %s', self.instance.id) + self.instance.terminate() + self.instance.wait_until_terminated() + + self._ssh_close() + + super(EC2Instance, self).destroy() + + def _execute(self, command, stdin=None, env=None): + """Execute command on instance.""" + env_args = [] + if env: + env_args = ['env'] + ["%s=%s" for k, v in env.items()] + + return self._ssh(['sudo'] + env_args + list(command), stdin=stdin) + + def start(self, wait=True, wait_for_cloud_init=False): + """Start instance on EC2 with the platfrom's VPC.""" + if self.instance: + if self.instance.state['Name'] == 'running': + return + + LOG.debug('starting instance %s', self.instance.id) + self.instance.start() + else: + LOG.debug('launching instance') + + args = { + 'ImageId': self.image_ami, + 'InstanceType': self.platform.instance_type, + 'KeyName': self.platform.key_name, + 'MaxCount': 1, + 'MinCount': 1, + 'SecurityGroupIds': [self.platform.security_group.id], + 'SubnetId': self.platform.subnet.id, + 'TagSpecifications': [{ + 'ResourceType': 'instance', + 'Tags': [{ + 'Key': 'Name', 'Value': self.platform.tag + }] + }], + } + + if self.user_data: + args['UserData'] = self.user_data + + try: + instances = self.platform.ec2_resource.create_instances(**args) + except botocore.exceptions.ClientError as error: + error_msg = error.response['Error']['Message'] + raise util.PlatformError('start', error_msg) + + self.instance = instances[0] + + LOG.debug('instance id: %s', self.instance.id) + if wait: + self.instance.wait_until_running() + self.instance.reload() + self.ssh_ip = self.instance.public_ip_address + self._wait_for_system(wait_for_cloud_init) + + def shutdown(self, wait=True): + """Shutdown instance.""" + LOG.debug('stopping instance %s', self.instance.id) + self.instance.stop() + + if wait: + self.instance.wait_until_stopped() + self.instance.reload() + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py new file mode 100644 index 00000000..f188c27b --- /dev/null +++ b/tests/cloud_tests/platforms/ec2/platform.py @@ -0,0 +1,258 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base EC2 platform.""" +from datetime import datetime +import os + +import boto3 +import botocore +from botocore import session, handlers +import base64 + +from ..platforms import Platform +from .image import EC2Image +from .instance import EC2Instance +from tests.cloud_tests import LOG + + +class EC2Platform(Platform): + """EC2 test platform.""" + + platform_name = 'ec2' + ipv4_cidr = '192.168.1.0/20' + + def __init__(self, config): + """Set up platform.""" + super(EC2Platform, self).__init__(config) + # Used for unique VPC, SSH key, and custom AMI generation naming + self.tag = '%s-%s' % ( + config['tag'], datetime.now().strftime('%Y%m%d%H%M%S')) + self.instance_type = config['instance-type'] + + try: + b3session = get_session() + self.ec2_client = b3session.client('ec2') + self.ec2_resource = b3session.resource('ec2') + self.ec2_region = b3session.region_name + self.key_name = self._upload_public_key(config) + except botocore.exceptions.NoRegionError: + raise RuntimeError( + 'Please configure default region in $HOME/.aws/config') + except botocore.exceptions.NoCredentialsError: + raise RuntimeError( + 'Please configure ec2 credentials in $HOME/.aws/credentials') + + self.vpc = self._create_vpc() + self.internet_gateway = self._create_internet_gateway() + self.subnet = self._create_subnet() + self.routing_table = self._create_routing_table() + self.security_group = self._create_security_group() + + def create_instance(self, properties, config, features, + image_ami, user_data=None): + """Create an instance + + @param src_img_path: image path to launch from + @param properties: image properties + @param config: image configuration + @param features: image features + @param image_ami: string of image ami ID + @param user_data: test user-data to pass to instance + @return_value: cloud_tests.instances instance + """ + return EC2Instance(self, properties, config, features, + image_ami, user_data) + + def destroy(self): + """Delete SSH keys, terminate all instances, and delete VPC.""" + for instance in self.vpc.instances.all(): + LOG.debug('waiting for instance %s termination', instance.id) + instance.terminate() + instance.wait_until_terminated() + + if self.key_name: + LOG.debug('deleting SSH key %s', self.key_name) + self.ec2_client.delete_key_pair(KeyName=self.key_name) + + if self.security_group: + LOG.debug('deleting security group %s', self.security_group.id) + self.security_group.delete() + + if self.subnet: + LOG.debug('deleting subnet %s', self.subnet.id) + self.subnet.delete() + + if self.routing_table: + LOG.debug('deleting routing table %s', self.routing_table.id) + self.routing_table.delete() + + if self.internet_gateway: + LOG.debug('deleting internet gateway %s', self.internet_gateway.id) + self.internet_gateway.detach_from_vpc(VpcId=self.vpc.id) + self.internet_gateway.delete() + + if self.vpc: + LOG.debug('deleting vpc %s', self.vpc.id) + self.vpc.delete() + + def get_image(self, img_conf): + """Get image using specified image configuration. + + Hard coded for 'amd64' based images. + + @param img_conf: configuration for image + @return_value: cloud_tests.images instance + """ + if img_conf['root-store'] == 'ebs': + root_store = 'ssd' + elif img_conf['root-store'] == 'instance-store': + root_store = 'instance' + else: + raise RuntimeError('Unknown root-store type: %s' % + (img_conf['root-store'])) + + filters = [ + 'arch=%s' % 'amd64', + 'endpoint=https://ec2.%s.amazonaws.com' % self.ec2_region, + 'region=%s' % self.ec2_region, + 'release=%s' % img_conf['release'], + 'root_store=%s' % root_store, + 'virt=hvm', + ] + + LOG.debug('finding image using streams') + image = self._query_streams(img_conf, filters) + + try: + image_ami = image['id'] + except KeyError: + raise RuntimeError('No images found for %s!' % img_conf['release']) + + LOG.debug('found image: %s', image_ami) + image = EC2Image(self, img_conf, image_ami) + return image + + def _create_internet_gateway(self): + """Create Internet Gateway and assign to VPC.""" + LOG.debug('creating internet gateway') + internet_gateway = self.ec2_resource.create_internet_gateway() + internet_gateway.attach_to_vpc(VpcId=self.vpc.id) + self._tag_resource(internet_gateway) + + return internet_gateway + + def _create_routing_table(self): + """Update default routing table with internet gateway. + + This sets up internet access between the VPC via the internet gateway + by configuring routing tables for IPv4 and IPv6. + """ + LOG.debug('creating routing table') + route_table = self.vpc.create_route_table() + route_table.create_route(DestinationCidrBlock='0.0.0.0/0', + GatewayId=self.internet_gateway.id) + route_table.create_route(DestinationIpv6CidrBlock='::/0', + GatewayId=self.internet_gateway.id) + route_table.associate_with_subnet(SubnetId=self.subnet.id) + self._tag_resource(route_table) + + return route_table + + def _create_security_group(self): + """Enables ingress to default VPC security group.""" + LOG.debug('creating security group') + security_group = self.vpc.create_security_group( + GroupName=self.tag, Description='integration test security group') + security_group.authorize_ingress( + IpProtocol='-1', FromPort=-1, ToPort=-1, CidrIp='0.0.0.0/0') + self._tag_resource(security_group) + + return security_group + + def _create_subnet(self): + """Generate IPv4 and IPv6 subnets for use.""" + ipv6_cidr = self.vpc.ipv6_cidr_block_association_set[0][ + 'Ipv6CidrBlock'][:-2] + '64' + + LOG.debug('creating subnet with following ranges:') + LOG.debug('ipv4: %s', self.ipv4_cidr) + LOG.debug('ipv6: %s', ipv6_cidr) + subnet = self.vpc.create_subnet(CidrBlock=self.ipv4_cidr, + Ipv6CidrBlock=ipv6_cidr) + modify_subnet = subnet.meta.client.modify_subnet_attribute + modify_subnet(SubnetId=subnet.id, + MapPublicIpOnLaunch={'Value': True}) + self._tag_resource(subnet) + + return subnet + + def _create_vpc(self): + """Setup AWS EC2 VPC or return existing VPC.""" + LOG.debug('creating new vpc') + try: + vpc = self.ec2_resource.create_vpc( + CidrBlock=self.ipv4_cidr, + AmazonProvidedIpv6CidrBlock=True) + except botocore.exceptions.ClientError as e: + raise RuntimeError(e) + + vpc.wait_until_available() + self._tag_resource(vpc) + + return vpc + + def _tag_resource(self, resource): + """Tag a resource with the specified tag. + + This makes finding and deleting resources specific to this testing + much easier to find. + + @param resource: resource to tag + """ + tag = { + 'Key': 'Name', + 'Value': self.tag + } + resource.create_tags(Tags=[tag]) + + def _upload_public_key(self, config): + """Generate random name and upload SSH key with that name. + + @param config: platform config + @return: string of ssh key name + """ + key_file = os.path.join(config['data_dir'], config['public_key']) + with open(key_file, 'r') as file: + public_key = file.read().strip('\n') + + LOG.debug('uploading SSH key %s', self.tag) + self.ec2_client.import_key_pair(KeyName=self.tag, + PublicKeyMaterial=public_key) + + return self.tag + + +def _decode_console_output_as_bytes(parsed, **kwargs): + """Provide console output as bytes in OutputBytes. + + For this to be useful, the session has to have had the + decode_console_output handler unregistered already. + + https://github.com/boto/botocore/issues/1351 .""" + if 'Output' not in parsed: + return + orig = parsed['Output'] + handlers.decode_console_output(parsed, **kwargs) + parsed['OutputBytes'] = base64.b64decode(orig) + + +def get_session(): + mysess = session.get_session() + mysess.unregister('after-call.ec2.GetConsoleOutput', + handlers.decode_console_output) + mysess.register('after-call.ec2.GetConsoleOutput', + _decode_console_output_as_bytes) + return boto3.Session(botocore_session=mysess) + + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/ec2/snapshot.py b/tests/cloud_tests/platforms/ec2/snapshot.py new file mode 100644 index 00000000..2c48cb54 --- /dev/null +++ b/tests/cloud_tests/platforms/ec2/snapshot.py @@ -0,0 +1,66 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base EC2 snapshot.""" + +from ..snapshots import Snapshot +from tests.cloud_tests import LOG + + +class EC2Snapshot(Snapshot): + """EC2 image copy backed snapshot.""" + + platform_name = 'ec2' + + def __init__(self, platform, properties, config, features, image_ami, + delete_on_destroy=True): + """Set up snapshot. + + @param platform: platform object + @param properties: image properties + @param config: image config + @param features: supported feature flags + @param image_ami: string of image ami ID + @param delete_on_destroy: boolean to delete on destroy + """ + super(EC2Snapshot, self).__init__( + platform, properties, config, features) + + self.image_ami = image_ami + self.delete_on_destroy = delete_on_destroy + + def destroy(self): + """Deregister the backing AMI.""" + if self.delete_on_destroy: + image = self.platform.ec2_resource.Image(self.image_ami) + snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId'] + + LOG.debug('removing custom ami %s', self.image_ami) + self.platform.ec2_client.deregister_image(ImageId=self.image_ami) + + LOG.debug('removing custom snapshot %s', snapshot_id) + self.platform.ec2_client.delete_snapshot(SnapshotId=snapshot_id) + + def launch(self, user_data, meta_data=None, block=True, start=True, + use_desc=None): + """Launch instance. + + @param user_data: user-data for the instance + @param meta_data: meta_data for the instance + @param block: wait until instance is created + @param start: start instance and wait until fully started + @param use_desc: string of test name + @return_value: an Instance + """ + if meta_data is not None: + raise ValueError("metadata not supported on Ec2") + + instance = self.platform.create_instance( + self.properties, self.config, self.features, + self.image_ami, user_data) + + if start: + instance.start() + + return instance + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/images/base.py b/tests/cloud_tests/platforms/images.py index d503108a..557a5cf6 100644 --- a/tests/cloud_tests/images/base.py +++ b/tests/cloud_tests/platforms/images.py @@ -26,7 +26,8 @@ class Image(TargetBase): @property def properties(self): """{} containing: 'arch', 'os', 'version', 'release'.""" - raise NotImplementedError + return {k: self.config[k] + for k in ('arch', 'os', 'release', 'version')} @property def features(self): diff --git a/tests/cloud_tests/instances/base.py b/tests/cloud_tests/platforms/instances.py index 8c59d62c..3bad021f 100644 --- a/tests/cloud_tests/instances/base.py +++ b/tests/cloud_tests/platforms/instances.py @@ -1,14 +1,21 @@ # This file is part of cloud-init. See LICENSE file for license information. """Base instance.""" +import time + +import paramiko +from paramiko.ssh_exception import ( + BadHostKeyException, AuthenticationException, SSHException) from ..util import TargetBase +from tests.cloud_tests import LOG, util class Instance(TargetBase): """Base instance object.""" platform_name = None + _ssh_client = None def __init__(self, platform, name, properties, config, features): """Set up instance. @@ -26,6 +33,11 @@ class Instance(TargetBase): self.features = features self._tmp_count = 0 + self.ssh_ip = None + self.ssh_port = None + self.ssh_key_file = None + self.ssh_username = 'ubuntu' + def console_log(self): """Instance console. @@ -47,7 +59,63 @@ class Instance(TargetBase): def destroy(self): """Clean up instance.""" - pass + self._ssh_close() + + def _ssh(self, command, stdin=None): + """Run a command via SSH.""" + client = self._ssh_connect() + + cmd = util.shell_pack(command) + fp_in, fp_out, fp_err = client.exec_command(cmd) + channel = fp_in.channel + + if stdin is not None: + fp_in.write(stdin) + fp_in.close() + + channel.shutdown_write() + rc = channel.recv_exit_status() + + return (fp_out.read(), fp_err.read(), rc) + + def _ssh_close(self): + if self._ssh_client: + try: + self._ssh_client.close() + except SSHException: + LOG.warning('Failed to close SSH connection.') + self._ssh_client = None + + def _ssh_connect(self): + """Connect via SSH.""" + if self._ssh_client: + return self._ssh_client + + if not self.ssh_ip or not self.ssh_port: + raise ValueError + + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) + + retries = 30 + while retries: + try: + client.connect(username=self.ssh_username, + hostname=self.ssh_ip, port=self.ssh_port, + pkey=private_key, banner_timeout=30) + self._ssh_client = client + return client + except (ConnectionRefusedError, AuthenticationException, + BadHostKeyException, ConnectionResetError, SSHException, + OSError) as e: + retries -= 1 + time.sleep(10) + + ssh_cmd = 'Failed ssh connection to %s@%s:%s after 300 seconds' % ( + self.ssh_username, self.ssh_ip, self.ssh_port + ) + raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh') def _wait_for_system(self, wait_for_cloud_init): """Wait until system has fully booted and cloud-init has finished. diff --git a/tests/cloud_tests/images/lxd.py b/tests/cloud_tests/platforms/lxd/image.py index 5caeba41..b5de1f52 100644 --- a/tests/cloud_tests/images/lxd.py +++ b/tests/cloud_tests/platforms/lxd/image.py @@ -6,13 +6,13 @@ import os import shutil import tempfile +from ..images import Image +from .snapshot import LXDSnapshot from cloudinit import util as c_util -from tests.cloud_tests.images import base -from tests.cloud_tests.snapshots import lxd as lxd_snapshot from tests.cloud_tests import util -class LXDImage(base.Image): +class LXDImage(Image): """LXD backed image.""" platform_name = "lxd" @@ -182,9 +182,8 @@ class LXDImage(base.Image): instance.run_script(self.config.get('boot_clean_script')) # freeze current instance and return snapshot instance.freeze() - return lxd_snapshot.LXDSnapshot( - self.platform, self.properties, self.config, - self.features, instance) + return LXDSnapshot(self.platform, self.properties, self.config, + self.features, instance) def destroy(self): """Clean up data associated with image.""" diff --git a/tests/cloud_tests/instances/lxd.py b/tests/cloud_tests/platforms/lxd/instance.py index 3b035d86..d2d2a1fd 100644 --- a/tests/cloud_tests/instances/lxd.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -2,14 +2,16 @@ """Base LXD instance.""" -from . import base - import os import shutil from tempfile import mkdtemp +from cloudinit.util import subp, ProcessExecutionError + +from ..instances import Instance -class LXDInstance(base.Instance): + +class LXDInstance(Instance): """LXD container backed instance.""" platform_name = "lxd" @@ -29,6 +31,7 @@ class LXDInstance(base.Instance): platform, name, properties, config, features) self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name)) self._setup_console_log() + self.name = name @property def pylxd_container(self): @@ -55,33 +58,25 @@ class LXDInstance(base.Instance): if env is None: env = {} - if stdin is not None: - # pylxd does not support input to execute. - # https://github.com/lxc/pylxd/issues/244 - # - # The solution here is write a tmp file in the container - # and then execute a shell that sets it standard in to - # be from that file, removes it, and calls the comand. - tmpf = self.tmpfile() - self.write_data(tmpf, stdin) - ncmd = 'exec <"{tmpf}"; rm -f "{tmpf}"; exec "$@"' - command = (['sh', '-c', ncmd.format(tmpf=tmpf), 'stdinhack'] + - list(command)) + env_args = [] + if env: + env_args = ['env'] + ["%s=%s" for k, v in env.items()] # ensure instance is running and execute the command self.start() - # execute returns a ContainerExecuteResult, named tuple - # (exit_code, stdout, stderr) - res = self.pylxd_container.execute(command, environment=env) - - # get out, exit and err from pylxd return - if not hasattr(res, 'exit_code'): - # pylxd 2.1.3 and earlier only return out and err, no exit - raise RuntimeError( - "No 'exit_code' in pylxd.container.execute return.\n" - "pylxd > 2.2 is required.") - - return res.stdout, res.stderr, res.exit_code + + # Use cmdline client due to https://github.com/lxc/pylxd/issues/268 + exit_code = 0 + try: + stdout, stderr = subp( + ['lxc', 'exec', self.name, '--'] + env_args + list(command), + data=stdin, decode=False) + except ProcessExecutionError as e: + exit_code = e.exit_code + stdout = e.stdout + stderr = e.stderr + + return stdout, stderr, exit_code def read_data(self, remote_path, decode=False): """Read data from instance filesystem. diff --git a/tests/cloud_tests/platforms/lxd.py b/tests/cloud_tests/platforms/lxd/platform.py index ead0955b..6a016929 100644 --- a/tests/cloud_tests/platforms/lxd.py +++ b/tests/cloud_tests/platforms/lxd/platform.py @@ -4,15 +4,15 @@ from pylxd import (Client, exceptions) -from tests.cloud_tests.images import lxd as lxd_image -from tests.cloud_tests.instances import lxd as lxd_instance -from tests.cloud_tests.platforms import base +from ..platforms import Platform +from .image import LXDImage +from .instance import LXDInstance from tests.cloud_tests import util DEFAULT_SSTREAMS_SERVER = "https://images.linuxcontainers.org:8443" -class LXDPlatform(base.Platform): +class LXDPlatform(Platform): """LXD test platform.""" platform_name = 'lxd' @@ -33,7 +33,7 @@ class LXDPlatform(base.Platform): pylxd_image = self.client.images.create_from_simplestreams( img_conf.get('sstreams_server', DEFAULT_SSTREAMS_SERVER), img_conf['alias']) - image = lxd_image.LXDImage(self, img_conf, pylxd_image) + image = LXDImage(self, img_conf, pylxd_image) if img_conf.get('override_templates', False): image.update_templates(self.config.get('template_overrides', {}), self.config.get('template_files', {})) @@ -69,8 +69,8 @@ class LXDPlatform(base.Platform): 'source': ({'type': 'image', 'fingerprint': image} if image else {'type': 'copy', 'source': container}) }, wait=block) - return lxd_instance.LXDInstance(self, container.name, properties, - config, features, container) + return LXDInstance(self, container.name, properties, config, features, + container) def container_exists(self, container_name): """Check if container with name 'container_name' exists. diff --git a/tests/cloud_tests/snapshots/lxd.py b/tests/cloud_tests/platforms/lxd/snapshot.py index 39c55c5e..b524644f 100644 --- a/tests/cloud_tests/snapshots/lxd.py +++ b/tests/cloud_tests/platforms/lxd/snapshot.py @@ -2,10 +2,10 @@ """Base LXD snapshot.""" -from tests.cloud_tests.snapshots import base +from ..snapshots import Snapshot -class LXDSnapshot(base.Snapshot): +class LXDSnapshot(Snapshot): """LXD image copy backed snapshot.""" platform_name = "lxd" diff --git a/tests/cloud_tests/images/nocloudkvm.py b/tests/cloud_tests/platforms/nocloudkvm/image.py index 8678b07f..bc2b6e75 100644 --- a/tests/cloud_tests/images/nocloudkvm.py +++ b/tests/cloud_tests/platforms/nocloudkvm/image.py @@ -8,11 +8,11 @@ import os import shutil import tempfile -from tests.cloud_tests.images import base -from tests.cloud_tests.snapshots import nocloudkvm as nocloud_kvm_snapshot +from ..images import Image +from .snapshot import NoCloudKVMSnapshot -class NoCloudKVMImage(base.Image): +class NoCloudKVMImage(Image): """NoCloud KVM backed image.""" platform_name = "nocloud-kvm" @@ -35,16 +35,6 @@ class NoCloudKVMImage(base.Image): super(NoCloudKVMImage, self).__init__(platform, config) - @property - def properties(self): - """Dictionary containing: 'arch', 'os', 'version', 'release'.""" - return { - 'arch': self.config['arch'], - 'os': self.config['family'], - 'release': self.config['release'], - 'version': self.config['version'], - } - def _execute(self, command, stdin=None, env=None): """Execute command in image, modifying image.""" return self.mount_image_callback(command, stdin=stdin, env=env) @@ -71,9 +61,8 @@ class NoCloudKVMImage(base.Image): if not self._img_path: raise RuntimeError() - return nocloud_kvm_snapshot.NoCloudKVMSnapshot( - self.platform, self.properties, self.config, - self.features, self._img_path) + return NoCloudKVMSnapshot(self.platform, self.properties, self.config, + self.features, self._img_path) def destroy(self): """Unset path to signal image is no longer used. diff --git a/tests/cloud_tests/instances/nocloudkvm.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py index bc06a79e..932dc0fa 100644 --- a/tests/cloud_tests/instances/nocloudkvm.py +++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py @@ -2,15 +2,17 @@ """Base NoCloud KVM instance.""" +import copy import os -import paramiko import socket import subprocess import time +import uuid +from ..instances import Instance +from cloudinit.atomic_helper import write_json from cloudinit import util as c_util -from tests.cloud_tests.instances import base -from tests.cloud_tests import util +from tests.cloud_tests import LOG, util # This domain contains reverse lookups for hostnames that are used. # The primary reason is so sudo will return quickly when it attempts @@ -19,11 +21,10 @@ from tests.cloud_tests import util CI_DOMAIN = "i9n.cloud-init.io" -class NoCloudKVMInstance(base.Instance): +class NoCloudKVMInstance(Instance): """NoCloud KVM backed instance.""" platform_name = "nocloud-kvm" - _ssh_client = None def __init__(self, platform, name, image_path, properties, config, features, user_data, meta_data): @@ -36,18 +37,72 @@ class NoCloudKVMInstance(base.Instance): @param config: dictionary of configuration values @param features: dictionary of supported feature flags """ + super(NoCloudKVMInstance, self).__init__( + platform, name, properties, config, features + ) + self.user_data = user_data - self.meta_data = meta_data - self.ssh_key_file = os.path.join(platform.config['data_dir'], - platform.config['private_key']) + if meta_data: + meta_data = copy.deepcopy(meta_data) + else: + meta_data = {} + + if 'instance-id' in meta_data: + iid = meta_data['instance-id'] + else: + iid = str(uuid.uuid1()) + meta_data['instance-id'] = iid + + self.instance_id = iid + self.ssh_key_file = os.path.join( + platform.config['data_dir'], platform.config['private_key']) + self.ssh_pubkey_file = os.path.join( + platform.config['data_dir'], platform.config['public_key']) + + self.ssh_pubkey = None + if self.ssh_pubkey_file: + with open(self.ssh_pubkey_file, "r") as fp: + self.ssh_pubkey = fp.read().rstrip('\n') + + if not meta_data.get('public-keys'): + meta_data['public-keys'] = [] + meta_data['public-keys'].append(self.ssh_pubkey) + + self.ssh_ip = '127.0.0.1' self.ssh_port = None self.pid = None self.pid_file = None self.console_file = None self.disk = image_path + self.meta_data = meta_data - super(NoCloudKVMInstance, self).__init__( - platform, name, properties, config, features) + def shutdown(self, wait=True): + """Shutdown instance.""" + + if self.pid: + # This relies on _execute which uses sudo over ssh. The ssh + # connection would get killed before sudo exited, so ignore errors. + cmd = ['shutdown', 'now'] + try: + self._execute(cmd) + except util.InTargetExecuteError: + pass + self._ssh_close() + + if wait: + LOG.debug("Executed shutdown. waiting on pid %s to end", + self.pid) + time_for_shutdown = 120 + give_up_at = time.time() + time_for_shutdown + pid_file_path = '/proc/%s' % self.pid + msg = ("pid %s did not exit in %s seconds after shutdown." % + (self.pid, time_for_shutdown)) + while True: + if not os.path.exists(pid_file_path): + break + if time.time() > give_up_at: + raise util.PlatformError("shutdown", msg) + self.pid = None def destroy(self): """Clean up instance.""" @@ -61,9 +116,7 @@ class NoCloudKVMInstance(base.Instance): os.remove(self.pid_file) self.pid = None - if self._ssh_client: - self._ssh_client.close() - self._ssh_client = None + self._ssh_close() super(NoCloudKVMInstance, self).destroy() @@ -72,17 +125,21 @@ class NoCloudKVMInstance(base.Instance): if env: env_args = ['env'] + ["%s=%s" for k, v in env.items()] - return self.ssh(['sudo'] + env_args + list(command), stdin=stdin) + return self._ssh(['sudo'] + env_args + list(command), stdin=stdin) def generate_seed(self, tmpdir): """Generate nocloud seed from user-data""" seed_file = os.path.join(tmpdir, '%s_seed.img' % self.name) user_data_file = os.path.join(tmpdir, '%s_user_data' % self.name) + meta_data_file = os.path.join(tmpdir, '%s_meta_data' % self.name) with open(user_data_file, "w") as ud_file: ud_file.write(self.user_data) - c_util.subp(['cloud-localds', seed_file, user_data_file]) + # meta-data can be yaml, but more easily pretty printed with json + write_json(meta_data_file, self.meta_data) + c_util.subp(['cloud-localds', seed_file, user_data_file, + meta_data_file]) return seed_file @@ -94,50 +151,6 @@ class NoCloudKVMInstance(base.Instance): s.close() return num - def ssh(self, command, stdin=None): - """Run a command via SSH.""" - client = self._ssh_connect() - - cmd = util.shell_pack(command) - try: - fp_in, fp_out, fp_err = client.exec_command(cmd) - channel = fp_in.channel - if stdin is not None: - fp_in.write(stdin) - fp_in.close() - - channel.shutdown_write() - rc = channel.recv_exit_status() - return (fp_out.read(), fp_err.read(), rc) - except paramiko.SSHException as e: - raise util.InTargetExecuteError( - b'', b'', -1, command, self.name, reason=e) - - def _ssh_connect(self, hostname='localhost', username='ubuntu', - banner_timeout=120, retry_attempts=30): - """Connect via SSH.""" - if self._ssh_client: - return self._ssh_client - - private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - while retry_attempts: - try: - client.connect(hostname=hostname, username=username, - port=self.ssh_port, pkey=private_key, - banner_timeout=banner_timeout) - self._ssh_client = client - return client - except (paramiko.SSHException, TypeError): - time.sleep(1) - retry_attempts = retry_attempts - 1 - - error_desc = 'Failed command to: %s@%s:%s' % (username, hostname, - self.ssh_port) - raise util.InTargetExecuteError('', '', -1, 'ssh connect', - self.name, error_desc) - def start(self, wait=True, wait_for_cloud_init=False): """Start instance.""" tmpdir = self.platform.config['data_dir'] diff --git a/tests/cloud_tests/platforms/nocloudkvm.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py index 76cd83ad..a7e6f5de 100644 --- a/tests/cloud_tests/platforms/nocloudkvm.py +++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py @@ -9,18 +9,22 @@ from simplestreams import mirrors from simplestreams import objectstores from simplestreams import util as s_util +from ..platforms import Platform +from .image import NoCloudKVMImage +from .instance import NoCloudKVMInstance from cloudinit import util as c_util -from tests.cloud_tests.images import nocloudkvm as nocloud_kvm_image -from tests.cloud_tests.instances import nocloudkvm as nocloud_kvm_instance -from tests.cloud_tests.platforms import base from tests.cloud_tests import util -class NoCloudKVMPlatform(base.Platform): +class NoCloudKVMPlatform(Platform): """NoCloud KVM test platform.""" platform_name = 'nocloud-kvm' + def __init__(self, config): + """Set up platform.""" + super(NoCloudKVMPlatform, self).__init__(config) + def get_image(self, img_conf): """Get image using specified image configuration. @@ -62,7 +66,7 @@ class NoCloudKVMPlatform(base.Platform): "Multiple images found in '%s': %s" % (search_d, ' '.join(images))) - image = nocloud_kvm_image.NoCloudKVMImage(self, img_conf, images[0]) + image = NoCloudKVMImage(self, img_conf, images[0]) return image def create_instance(self, properties, config, features, @@ -83,9 +87,7 @@ class NoCloudKVMPlatform(base.Platform): c_util.subp(['qemu-img', 'create', '-f', 'qcow2', '-b', src_img_path, img_path]) - return nocloud_kvm_instance.NoCloudKVMInstance(self, name, img_path, - properties, config, - features, user_data, - meta_data) + return NoCloudKVMInstance(self, name, img_path, properties, config, + features, user_data, meta_data) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/snapshots/nocloudkvm.py b/tests/cloud_tests/platforms/nocloudkvm/snapshot.py index 21e908da..2dae3590 100644 --- a/tests/cloud_tests/snapshots/nocloudkvm.py +++ b/tests/cloud_tests/platforms/nocloudkvm/snapshot.py @@ -5,10 +5,10 @@ import os import shutil import tempfile -from tests.cloud_tests.snapshots import base +from ..snapshots import Snapshot -class NoCloudKVMSnapshot(base.Snapshot): +class NoCloudKVMSnapshot(Snapshot): """NoCloud KVM image copy backed snapshot.""" platform_name = "nocloud-kvm" @@ -41,10 +41,6 @@ class NoCloudKVMSnapshot(base.Snapshot): @param use_desc: description of snapshot instance use @return_value: an Instance """ - key_file = os.path.join(self.platform.config['data_dir'], - self.platform.config['public_key']) - user_data = self.inject_ssh_key(user_data, key_file) - instance = self.platform.create_instance( self.properties, self.config, self.features, self._image_path, image_desc=str(self), use_desc=use_desc, @@ -55,22 +51,6 @@ class NoCloudKVMSnapshot(base.Snapshot): return instance - def inject_ssh_key(self, user_data, key_file): - """Inject the authorized key into the user_data.""" - with open(key_file) as f: - value = f.read() - - key = 'ssh_authorized_keys:' - value = ' - %s' % value.strip() - user_data = user_data.split('\n') - if key in user_data: - user_data.insert(user_data.index(key) + 1, '%s' % value) - else: - user_data.insert(-1, '%s' % key) - user_data.insert(-1, '%s' % value) - - return '\n'.join(user_data) - def destroy(self): """Clean up snapshot data.""" shutil.rmtree(self._workd) diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py new file mode 100644 index 00000000..1542b3be --- /dev/null +++ b/tests/cloud_tests/platforms/platforms.py @@ -0,0 +1,96 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base platform class.""" +import os + +from simplestreams import filters, mirrors +from simplestreams import util as s_util + +from cloudinit import util as c_util + + +class Platform(object): + """Base class for platforms.""" + + platform_name = None + + def __init__(self, config): + """Set up platform.""" + self.config = config + self._generate_ssh_keys(config['data_dir']) + + def get_image(self, img_conf): + """Get image using specified image configuration. + + @param img_conf: configuration for image + @return_value: cloud_tests.images instance + """ + raise NotImplementedError + + def destroy(self): + """Clean up platform data.""" + pass + + def _generate_ssh_keys(self, data_dir): + """Generate SSH keys to be used with image.""" + filename = os.path.join(data_dir, self.config['private_key']) + + if os.path.exists(filename): + c_util.del_file(filename) + + c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096', + '-f', filename, '-P', '', + '-C', 'ubuntu@cloud_test'], + capture=True) + + @staticmethod + def _query_streams(img_conf, img_filter): + """Query streams for latest image given a specific filter. + + @param img_conf: configuration for image + @param filters: array of filters as strings format 'key=value' + @return: dictionary with latest image information or empty + """ + def policy(content, path): + return s_util.read_signed(content, keyring=img_conf['keyring']) + + (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None) + smirror = mirrors.UrlMirrorReader(url, policy=policy) + + config = {'max_items': 1, 'filters': filters.get_filters(img_filter)} + tmirror = FilterMirror(config) + tmirror.sync(smirror, path) + + try: + return tmirror.json_entries[0] + except IndexError: + raise RuntimeError('no images found with filter: %s' % img_filter) + + +class FilterMirror(mirrors.BasicMirrorWriter): + """Taken from sstream-query to return query result as json array.""" + + def __init__(self, config=None): + super(FilterMirror, self).__init__(config=config) + if config is None: + config = {} + self.config = config + self.filters = config.get('filters', []) + self.json_entries = [] + + def load_products(self, path=None, content_id=None): + return {'content_id': content_id, 'products': {}} + + def filter_item(self, data, src, target, pedigree): + return filters.filter_item(self.filters, data, src, pedigree) + + def insert_item(self, data, src, target, pedigree, contentsource): + # src and target are top level products:1.0 + # data is src['products'][ped[0]]['versions'][ped[1]]['items'][ped[2]] + # contentsource is a ContentSource if 'path' exists in data or None + data = s_util.products_exdata(src, pedigree) + if 'path' in data: + data.update({'item_url': contentsource.url}) + self.json_entries.append(data) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/snapshots/base.py b/tests/cloud_tests/platforms/snapshots.py index 94328982..94328982 100644 --- a/tests/cloud_tests/snapshots/base.py +++ b/tests/cloud_tests/platforms/snapshots.py diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index e5933802..d8bc170f 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -27,10 +27,14 @@ default_release_config: # features groups and additional feature settings feature_groups: [] features: {} - nocloud-kvm: mirror_url: https://cloud-images.ubuntu.com/daily - mirror_dir: '/srv/citest/nocloud-kvm' + mirror_dir: '/srv/citest/images' keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg + ec2: + # Choose from: [ebs, instance-store] + root-store: ebs + boot_timeout: 300 + nocloud-kvm: setup_overrides: null override_templates: false # lxd specific default configuration options @@ -128,7 +132,7 @@ releases: enabled: true release: bionic version: 18.04 - family: ubuntu + os: ubuntu feature_groups: - base - debian_base @@ -144,7 +148,7 @@ releases: enabled: true release: artful version: 17.10 - family: ubuntu + os: ubuntu feature_groups: - base - debian_base @@ -154,29 +158,13 @@ releases: alias: artful setup_overrides: null override_templates: false - zesty: - # EOL: Jan 2018 - default: - enabled: true - release: zesty - version: 17.04 - family: ubuntu - feature_groups: - - base - - debian_base - - ubuntu_specific - lxd: - sstreams_server: https://cloud-images.ubuntu.com/daily - alias: zesty - setup_overrides: null - override_templates: false xenial: # EOL: Apr 2021 default: enabled: true release: xenial version: 16.04 - family: ubuntu + os: ubuntu feature_groups: - base - debian_base @@ -192,7 +180,7 @@ releases: enabled: true release: trusty version: 14.04 - family: ubuntu + os: ubuntu feature_groups: - base - debian_base diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py index 179f40db..6d242115 100644 --- a/tests/cloud_tests/setup_image.py +++ b/tests/cloud_tests/setup_image.py @@ -5,7 +5,6 @@ from functools import partial import os -from cloudinit import util as c_util from tests.cloud_tests import LOG from tests.cloud_tests import stage, util @@ -192,20 +191,6 @@ def enable_repo(args, image): image.execute(cmd, description=msg) -def generate_ssh_keys(data_dir): - """Generate SSH keys to be used with image.""" - LOG.info('generating SSH keys') - filename = os.path.join(data_dir, 'id_rsa') - - if os.path.exists(filename): - c_util.del_file(filename) - - c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096', - '-f', filename, '-P', '', - '-C', 'ubuntu@cloud_test'], - capture=True) - - def setup_image(args, image): """Set up image as specified in args. @@ -239,9 +224,6 @@ def setup_image(args, image): LOG.info('setting up %s', image) res = stage.run_stage( 'set up for {}'.format(image), calls, continue_after_error=False) - LOG.debug('after setup complete, installed cloud-init version is: %s', - installed_package_version(image, 'cloud-init')) - generate_ssh_keys(args.data_dir) return res # vi: ts=4 expandtab diff --git a/tests/cloud_tests/snapshots/__init__.py b/tests/cloud_tests/snapshots/__init__.py deleted file mode 100644 index 93a54f5e..00000000 --- a/tests/cloud_tests/snapshots/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Main init.""" - - -def get_snapshot(image): - """Get snapshot from image.""" - return image.snapshot() - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml index 7183e017..8e0fb62f 100644 --- a/tests/cloud_tests/testcases.yaml +++ b/tests/cloud_tests/testcases.yaml @@ -7,22 +7,37 @@ base_test_data: #cloud-config collect_scripts: cloud-init.log: | - #!/bin/bash + #!/bin/sh cat /var/log/cloud-init.log cloud-init-output.log: | - #!/bin/bash + #!/bin/sh cat /var/log/cloud-init-output.log instance-id: | - #!/bin/bash + #!/bin/sh cat /run/cloud-init/.instance-id result.json: | - #!/bin/bash + #!/bin/sh cat /run/cloud-init/result.json status.json: | - #!/bin/bash + #!/bin/sh cat /run/cloud-init/status.json cloud-init-version: | - #!/bin/bash + #!/bin/sh dpkg-query -W -f='${Version}' cloud-init + system.journal.gz: | + #!/bin/sh + [ -d /run/systemd ] || { echo "not systemd."; exit 0; } + fail() { echo "ERROR:" "$@" 1>&2; exit 1; } + journal="" + for d in /run/log/journal /var/log/journal; do + for f in $d/*/system.journal; do + [ -f "$f" ] || continue + [ -z "$journal" ] || + fail "multiple journal found: $f $journal." + journal="$f" + done + done + [ -f "$journal" ] || fail "no journal file found." + gzip --to-stdout "$journal" # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 1706f59b..20e95955 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -12,7 +12,8 @@ from cloudinit import util as c_util class CloudTestCase(unittest.TestCase): """Base test class for verifiers.""" - data = None + # data gets populated in get_suite.setUpClass + data = {} conf = None _cloud_config = None @@ -29,12 +30,14 @@ class CloudTestCase(unittest.TestCase): raise AssertionError('Key "{}" not in cloud config'.format(name)) return self.cloud_config[name] - def get_data_file(self, name): + def get_data_file(self, name, decode=True): """Get data file failing test if it is not present.""" if name not in self.data: raise AssertionError('File "{}" missing from collect data' .format(name)) - return self.data[name] + if not decode: + return self.data[name] + return self.data[name].decode('utf-8') def get_instance_id(self): """Get recorded instance id.""" diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py index 129d2264..cf84e056 100644 --- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py +++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py @@ -10,6 +10,11 @@ class TestAptconfigureSourcesList(base.CloudTestCase): def test_sources_list(self): """Test sources.list includes sources.""" out = self.get_data_file('sources.list') + + # Verify we have 6 entires + self.assertEqual(6, len(out.rstrip().split('\n'))) + + # Verify the keys generated the list correctly self.assertRegex(out, r'deb http:\/\/archive.ubuntu.com\/ubuntu ' '[a-z].* main restricted') self.assertRegex(out, r'deb-src http:\/\/archive.ubuntu.com\/ubuntu ' diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml index 143cb080..87e470c1 100644 --- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml +++ b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml @@ -7,6 +7,12 @@ required_features: cloud_config: | #cloud-config apt: + primary: + - arches: [default] + uri: http://archive.ubuntu.com/ubuntu + security: + - arches: [default] + uri: http://security.ubuntu.com/ubuntu sources_list: | deb $MIRROR $RELEASE main restricted deb-src $MIRROR $RELEASE main restricted diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.yaml b/tests/cloud_tests/testcases/modules/ntp_pools.yaml index 3a93faa2..d490b228 100644 --- a/tests/cloud_tests/testcases/modules/ntp_pools.yaml +++ b/tests/cloud_tests/testcases/modules/ntp_pools.yaml @@ -26,6 +26,6 @@ collect_scripts: grep '^pool' /etc/ntp.conf ntpq_servers: | #!/bin/sh - ntpq -p -w + ntpq -p -w -n # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.yaml b/tests/cloud_tests/testcases/modules/ntp_servers.yaml index d59d45a8..6b13b70e 100644 --- a/tests/cloud_tests/testcases/modules/ntp_servers.yaml +++ b/tests/cloud_tests/testcases/modules/ntp_servers.yaml @@ -22,6 +22,6 @@ collect_scripts: grep '^server' /etc/ntp.conf ntpq_servers: | #!/bin/sh - ntpq -p -w + ntpq -p -w -n # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py index eb6f0650..a405b30b 100644 --- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py +++ b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. """cloud-init Integration Test Verify Script.""" -from tests.cloud_tests.instances.nocloudkvm import CI_DOMAIN +from tests.cloud_tests import CI_DOMAIN from tests.cloud_tests.testcases import base diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py index c5cd6974..6ff285e7 100644 --- a/tests/cloud_tests/util.py +++ b/tests/cloud_tests/util.py @@ -262,7 +262,7 @@ def shell_safe(cmd): out = subprocess.check_output( ["getopt", "--shell", "sh", "--options", "", "--", "--"] + list(cmd)) # out contains ' -- <data>\n'. drop the ' -- ' and the '\n' - return out[4:-1].decode() + return out.decode()[4:-1] def shell_pack(cmd): @@ -321,9 +321,9 @@ class TargetBase(object): rcs = (0,) if description: - LOG.debug('Executing "%s"', description) + LOG.debug('executing "%s"', description) else: - LOG.debug("Executing command: %s", shell_quote(command)) + LOG.debug("executing command: %s", shell_quote(command)) out, err, rc = self._execute(command=command, stdin=stdin, env=env) @@ -447,6 +447,19 @@ class InTargetExecuteError(c_util.ProcessExecutionError): reason=reason) +class PlatformError(IOError): + """Error type for platform errors.""" + + default_desc = 'unexpected error in platform.' + + def __init__(self, operation, description=None): + """Init error and parent error class.""" + description = description if description else self.default_desc + + message = '%s: %s' % (operation, description) + IOError.__init__(self, message) + + class TempDir(object): """Configurable temporary directory like tempfile.TemporaryDirectory.""" diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index fc1efcfc..2a9fd520 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -29,7 +29,7 @@ def verify_data(base_dir, tests): data = {} test_dir = os.path.join(base_dir, test_name) for script_name in os.listdir(test_dir): - with open(os.path.join(test_dir, script_name), 'r') as fp: + with open(os.path.join(test_dir, script_name), 'rb') as fp: data[script_name] = fp.read() # get test suite and launch tests diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index fccbbd23..0c0f427a 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,16 +1,21 @@ # This file is part of cloud-init. See LICENSE file for license information. +from collections import namedtuple +import os import six +from cloudinit.cmd import main as cli from cloudinit.tests import helpers as test_helpers +from cloudinit.util import load_file, load_json -from cloudinit.cmd import main as cli mock = test_helpers.mock class TestCLI(test_helpers.FilesystemMockingTestCase): + with_logs = True + def setUp(self): super(TestCLI, self).setUp() self.stderr = six.StringIO() @@ -24,6 +29,76 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): except SystemExit as e: return e.code + def test_status_wrapper_errors_on_invalid_name(self): + """status_wrapper will error when the name parameter is not valid. + + Valid name values are only init and modules. + """ + tmpd = self.tmp_dir() + data_d = self.tmp_path('data', tmpd) + link_d = self.tmp_path('link', tmpd) + FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + + def myaction(): + raise Exception('Should not call myaction') + + myargs = FakeArgs(('doesnotmatter', myaction), False, 'bogusmode') + with self.assertRaises(ValueError) as cm: + cli.status_wrapper('init1', myargs, data_d, link_d) + self.assertEqual('unknown name: init1', str(cm.exception)) + self.assertNotIn('Should not call myaction', self.logs.getvalue()) + + def test_status_wrapper_errors_on_invalid_modes(self): + """status_wrapper will error if a parameter combination is invalid.""" + tmpd = self.tmp_dir() + data_d = self.tmp_path('data', tmpd) + link_d = self.tmp_path('link', tmpd) + FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + + def myaction(): + raise Exception('Should not call myaction') + + myargs = FakeArgs(('modules_name', myaction), False, 'bogusmode') + with self.assertRaises(ValueError) as cm: + cli.status_wrapper('modules', myargs, data_d, link_d) + self.assertEqual( + "Invalid cloud init mode specified 'modules-bogusmode'", + str(cm.exception)) + self.assertNotIn('Should not call myaction', self.logs.getvalue()) + + def test_status_wrapper_init_local_writes_fresh_status_info(self): + """When running in init-local mode, status_wrapper writes status.json. + + Old status and results artifacts are also removed. + """ + tmpd = self.tmp_dir() + data_d = self.tmp_path('data', tmpd) + link_d = self.tmp_path('link', tmpd) + status_link = self.tmp_path('status.json', link_d) + # Write old artifacts which will be removed or updated. + for _dir in data_d, link_d: + test_helpers.populate_dir( + _dir, {'status.json': 'old', 'result.json': 'old'}) + + FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + + def myaction(name, args): + # Return an error to watch status capture them + return 'SomeDatasource', ['an error'] + + myargs = FakeArgs(('ignored_name', myaction), True, 'bogusmode') + cli.status_wrapper('init', myargs, data_d, link_d) + # No errors reported in status + status_v1 = load_json(load_file(status_link))['v1'] + self.assertEqual(['an error'], status_v1['init-local']['errors']) + self.assertEqual('SomeDatasource', status_v1['datasource']) + self.assertFalse( + os.path.exists(self.tmp_path('result.json', data_d)), + 'unexpected result.json found') + self.assertFalse( + os.path.exists(self.tmp_path('result.json', link_d)), + 'unexpected result.json link found') + def test_no_arguments_shows_usage(self): exit_code = self._call_main() self.assertIn('usage: cloud-init', self.stderr.getvalue()) @@ -45,8 +120,8 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): """All known subparsers are represented in the cloud-int help doc.""" self._call_main() error = self.stderr.getvalue() - expected_subcommands = ['analyze', 'init', 'modules', 'single', - 'dhclient-hook', 'features', 'devel'] + expected_subcommands = ['analyze', 'clean', 'devel', 'dhclient-hook', + 'features', 'init', 'modules', 'single'] for subcommand in expected_subcommands: self.assertIn(subcommand, error) @@ -76,9 +151,11 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self.patchStdoutAndStderr(stdout=stdout) expected_errors = [ - 'usage: cloud-init analyze', 'usage: cloud-init collect-logs', - 'usage: cloud-init devel'] - conditional_subcommands = ['analyze', 'collect-logs', 'devel'] + 'usage: cloud-init analyze', 'usage: cloud-init clean', + 'usage: cloud-init collect-logs', 'usage: cloud-init devel', + 'usage: cloud-init status'] + conditional_subcommands = [ + 'analyze', 'clean', 'collect-logs', 'devel', 'status'] # The cloud-init entrypoint calls main without passing sys_argv for subcommand in conditional_subcommands: with mock.patch('sys.argv', ['cloud-init', subcommand, '-h']): @@ -106,6 +183,22 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self._call_main(['cloud-init', 'collect-logs', '-h']) self.assertIn('usage: cloud-init collect-log', stdout.getvalue()) + def test_clean_subcommand_parser(self): + """The subcommand cloud-init clean calls the subparser.""" + # Provide -h param to clean to avoid having to mock behavior. + stdout = six.StringIO() + self.patchStdoutAndStderr(stdout=stdout) + self._call_main(['cloud-init', 'clean', '-h']) + self.assertIn('usage: cloud-init clean', stdout.getvalue()) + + def test_status_subcommand_parser(self): + """The subcommand cloud-init status calls the subparser.""" + # Provide -h param to clean to avoid having to mock behavior. + stdout = six.StringIO() + self.patchStdoutAndStderr(stdout=stdout) + self._call_main(['cloud-init', 'status', '-h']) + self.assertIn('usage: cloud-init status', stdout.getvalue()) + def test_devel_subcommand_parser(self): """The subcommand cloud-init devel calls the correct subparser.""" self._call_main(['cloud-init', 'devel']) diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py index ee88520d..2a1095b9 100644 --- a/tests/unittests/test_cs_util.py +++ b/tests/unittests/test_cs_util.py @@ -35,6 +35,7 @@ class CepkoMock(Cepko): # touched the underlying Cepko class methods. class CepkoResultTests(test_helpers.TestCase): def setUp(self): + self.c = Cepko() raise test_helpers.SkipTest('This test is completely useless') def test_getitem(self): diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index 82ee9714..4fa9616b 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -47,6 +47,9 @@ def register_mock_metaserver(base_url, data): elif isinstance(body, list): register(base_url.rstrip('/'), '\n'.join(body) + '\n') elif isinstance(body, dict): + if not body: + register(base_url.rstrip('/') + '/', 'not found', + status_code=404) vals = [] for k, v in body.items(): if isinstance(v, (str, list)): @@ -67,7 +70,7 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): super(TestAliYunDatasource, self).setUp() cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} distro = {} - paths = helpers.Paths({}) + paths = helpers.Paths({'run_dir': self.tmp_dir()}) self.ds = ay.DataSourceAliYun(cfg, distro, paths) self.metadata_address = self.ds.metadata_urls[0] @@ -91,9 +94,22 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): self.metadata_address, self.ds.min_metadata_version, 'user-data') + # EC2 provides an instance-identity document which must return 404 here + # for this test to pass. + @property + def default_identity(self): + return {} + + @property + def identity_url(self): + return os.path.join(self.metadata_address, + self.ds.min_metadata_version, + 'dynamic', 'instance-identity') + def regist_default_server(self): register_mock_metaserver(self.metadata_url, self.default_metadata) register_mock_metaserver(self.userdata_url, self.default_userdata) + register_mock_metaserver(self.identity_url, self.default_identity) def _test_get_data(self): self.assertEqual(self.ds.metadata, self.default_metadata) diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index a4dfb540..3253f3ad 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -18,7 +18,7 @@ import tempfile from cloudinit import helpers from cloudinit import util -from cloudinit.tests.helpers import TestCase +from cloudinit.tests.helpers import CiTestCase import cloudinit.sources.DataSourceAltCloud as dsac @@ -97,7 +97,7 @@ def _dmi_data(expected): return _data -class TestGetCloudType(TestCase): +class TestGetCloudType(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.get_cloud_type() ''' @@ -143,14 +143,16 @@ class TestGetCloudType(TestCase): self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) -class TestGetDataCloudInfoFile(TestCase): +class TestGetDataCloudInfoFile(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.get_data() With a contrived CLOUD_INFO_FILE ''' def setUp(self): '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.cloud_info_file = tempfile.mkstemp()[1] self.dmi_data = util.read_dmi_data dsac.CLOUD_INFO_FILE = self.cloud_info_file @@ -207,14 +209,16 @@ class TestGetDataCloudInfoFile(TestCase): self.assertEqual(False, dsrc.get_data()) -class TestGetDataNoCloudInfoFile(TestCase): +class TestGetDataNoCloudInfoFile(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.get_data() Without a CLOUD_INFO_FILE ''' def setUp(self): '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.dmi_data = util.read_dmi_data dsac.CLOUD_INFO_FILE = \ 'no such file' @@ -254,7 +258,7 @@ class TestGetDataNoCloudInfoFile(TestCase): self.assertEqual(False, dsrc.get_data()) -class TestUserDataRhevm(TestCase): +class TestUserDataRhevm(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.user_data_rhevm() ''' @@ -320,7 +324,7 @@ class TestUserDataRhevm(TestCase): self.assertEqual(False, dsrc.user_data_rhevm()) -class TestUserDataVsphere(TestCase): +class TestUserDataVsphere(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.user_data_vsphere() ''' @@ -368,7 +372,7 @@ class TestUserDataVsphere(TestCase): self.assertEqual(1, m_mount_cb.call_count) -class TestReadUserDataCallback(TestCase): +class TestReadUserDataCallback(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.read_user_data_callback() ''' diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 7cb1812a..254e9876 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -5,20 +5,19 @@ from cloudinit.util import b64e, decode_binary, load_file, write_file from cloudinit.sources import DataSourceAzure as dsaz from cloudinit.util import find_freebsd_part from cloudinit.util import get_path_dev_freebsd - +from cloudinit.version import version_string as vs from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, ExitStack, PY26, SkipTest) import crypt import os -import shutil import stat -import tempfile import xml.etree.ElementTree as ET import yaml -def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): +def construct_valid_ovf_env(data=None, pubkeys=None, + userdata=None, platform_settings=None): if data is None: data = {'HostName': 'FOOHOST'} if pubkeys is None: @@ -38,9 +37,9 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): """ for key, dval in data.items(): if isinstance(dval, dict): - val = dval.get('text') - attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v in dval.items() - if k != 'text']) + val = dict(dval).get('text') + attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v + in dict(dval).items() if k != 'text']) else: val = dval attrs = "" @@ -68,10 +67,12 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> <KmsServerHostname>kms.core.windows.net</KmsServerHostname> <ProvisionGuestAgent>false</ProvisionGuestAgent> - <GuestAgentPackageName i:nil="true" /> - </PlatformSettings></wa:PlatformSettingsSection> -</Environment> - """ + <GuestAgentPackageName i:nil="true" />""" + if platform_settings: + for k, v in platform_settings.items(): + content += "<%s>%s</%s>\n" % (k, v, k) + content += """</PlatformSettings></wa:PlatformSettingsSection> +</Environment>""" return content @@ -84,11 +85,11 @@ class TestAzureDataSource(CiTestCase): super(TestAzureDataSource, self).setUp() if PY26: raise SkipTest("Does not work on python 2.6") - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + self.tmp = self.tmp_dir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') self.patches = ExitStack() @@ -176,6 +177,7 @@ scbus-1 on xpt0 bus 0 (dsaz, 'get_hostname', mock.MagicMock()), (dsaz, 'set_hostname', mock.MagicMock()), (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), + (dsaz.util, 'which', lambda x: True), (dsaz.util, 'read_dmi_data', mock.MagicMock( side_effect=_dmi_mocks)), (dsaz.util, 'wait_for_files', mock.MagicMock( @@ -642,7 +644,9 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertEqual(netconfig, expected_config) -class TestAzureBounce(TestCase): +class TestAzureBounce(CiTestCase): + + with_logs = True def mock_out_azure_moving_parts(self): self.patches.enter_context( @@ -655,6 +659,8 @@ class TestAzureBounce(TestCase): self.patches.enter_context( mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) + self.patches.enter_context( + mock.patch.object(dsaz.util, 'which', lambda x: True)) def _dmi_mocks(key): if key == 'system-uuid': @@ -669,10 +675,10 @@ class TestAzureBounce(TestCase): def setUp(self): super(TestAzureBounce, self).setUp() - self.tmp = tempfile.mkdtemp() + self.tmp = self.tmp_dir() self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - self.addCleanup(shutil.rmtree, self.tmp) + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.patches = ExitStack() self.mock_out_azure_moving_parts() @@ -714,21 +720,24 @@ class TestAzureBounce(TestCase): def test_disabled_bounce_does_not_change_hostname(self): cfg = {'hostname_bounce': {'policy': 'off'}} - self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() self.assertEqual(0, self.set_hostname.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') def test_disabled_bounce_does_not_perform_bounce( self, perform_hostname_bounce): cfg = {'hostname_bounce': {'policy': 'off'}} - self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() self.assertEqual(0, perform_hostname_bounce.call_count) def test_same_hostname_does_not_change_hostname(self): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'yes'}} - self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() self.assertEqual(0, self.set_hostname.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') @@ -737,7 +746,8 @@ class TestAzureBounce(TestCase): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'yes'}} - self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() self.assertEqual(0, perform_hostname_bounce.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') @@ -751,6 +761,22 @@ class TestAzureBounce(TestCase): self.assertTrue(ret) self.assertEqual(1, perform_hostname_bounce.call_count) + def test_bounce_skipped_on_ifupdown_absent(self): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'force'}} + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), + agent_command=['not', '__builtin__']) + patch_path = 'cloudinit.sources.DataSourceAzure.util.which' + with mock.patch(patch_path) as m_which: + m_which.return_value = None + ret = self._get_and_setup(dsrc) + self.assertEqual([mock.call('ifup')], m_which.call_args_list) + self.assertTrue(ret) + self.assertIn( + "Skipping network bounce: ifupdown utils aren't present.", + self.logs.getvalue()) + def test_different_hostnames_sets_hostname(self): expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' @@ -815,9 +841,7 @@ class TestAzureBounce(TestCase): self.assertEqual(hostname, bounce_env['hostname']) self.assertEqual(old_hostname, bounce_env['old_hostname']) - def test_default_bounce_command_used_by_default(self): - cmd = 'default-bounce-command' - dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd + def test_default_bounce_command_ifup_used_by_default(self): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) @@ -825,7 +849,8 @@ class TestAzureBounce(TestCase): self.assertTrue(ret) self.assertEqual(1, self.subp.call_count) bounce_args = self.subp.call_args[1]['args'] - self.assertEqual(cmd, bounce_args) + self.assertEqual( + dsaz.BOUNCE_COMMAND_IFUP, bounce_args) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') def test_set_hostname_option_can_disable_bounce( @@ -895,9 +920,6 @@ class TestCanDevBeReformatted(CiTestCase): setattr(self, sattr, patcher.start()) self.addCleanup(patcher.stop) - def setUp(self): - super(TestCanDevBeReformatted, self).setUp() - def patchup(self, devs): bypath = {} for path, data in devs.items(): @@ -952,14 +974,14 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda3': {'num': 3}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(False, value) + self.assertFalse(value) self.assertIn("3 or more", msg.lower()) def test_no_partitions_is_false(self): """A disk with no partitions can not be formatted.""" self.patchup({'/dev/sda': {}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("not partitioned", msg.lower()) def test_two_partitions_not_ntfs_false(self): @@ -971,7 +993,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(False, value) + self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) def test_two_partitions_ntfs_populated_false(self): @@ -984,7 +1006,7 @@ class TestCanDevBeReformatted(CiTestCase): 'files': ['secret.txt']}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(False, value) + self.assertFalse(value) self.assertIn("files on it", msg.lower()) def test_two_partitions_ntfs_empty_is_true(self): @@ -996,7 +1018,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_not_ntfs_false(self): @@ -1007,7 +1029,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'zfs'}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) def test_one_partition_ntfs_populated_false(self): @@ -1019,7 +1041,7 @@ class TestCanDevBeReformatted(CiTestCase): 'files': ['file1.txt', 'file2.exe']}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("files on it", msg.lower()) def test_one_partition_ntfs_empty_is_true(self): @@ -1030,7 +1052,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): @@ -1042,7 +1064,7 @@ class TestCanDevBeReformatted(CiTestCase): 'files': ['dataloss_warning_readme.txt']} }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_through_realpath_is_true(self): @@ -1057,7 +1079,7 @@ class TestCanDevBeReformatted(CiTestCase): 'realpath': '/dev/sdb1'} }}}) value, msg = dsaz.can_dev_be_reformatted(epath) - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_three_partition_through_realpath_is_false(self): @@ -1076,7 +1098,7 @@ class TestCanDevBeReformatted(CiTestCase): 'realpath': '/dev/sdb3'} }}}) value, msg = dsaz.can_dev_be_reformatted(epath) - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("3 or more", msg.lower()) @@ -1088,4 +1110,146 @@ class TestAzureNetExists(CiTestCase): self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) +@mock.patch('cloudinit.sources.DataSourceAzure.util.subp') +@mock.patch.object(dsaz, 'get_hostname') +@mock.patch.object(dsaz, 'set_hostname') +class TestAzureDataSourcePreprovisioning(CiTestCase): + + def setUp(self): + super(TestAzureDataSourcePreprovisioning, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + def test_read_azure_ovf_with_true_flag(self, *args): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag if the proper setting is present.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_with_false_flag(self, *args): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag to false if the proper setting is false.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "False"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_without_flag(self, *args): + """The read_azure_ovf method should not set the + PreprovisionedVM cfg flag.""" + content = construct_valid_ovf_env() + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('requests.Session.request') + def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net, + m_is_bsd, *args): + """The _poll_imds method should return the ovf_env.xml.""" + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02' + host = "169.254.169.254" + full_url = url.format(host) + fake_resp.return_value = mock.MagicMock(status_code=200, text="ovf") + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(len(dsa._poll_imds()) > 0) + self.assertEqual(fake_resp.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', timeout=60.0, + url=full_url), + mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', url=full_url)]) + self.assertEqual(m_dhcp.call_count, 1) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1') + self.assertEqual(m_net.call_count, 1) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('requests.Session.request') + def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net, + m_is_bsd, *args): + """The _reprovision method should call poll IMDS.""" + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02' + host = "169.254.169.254" + full_url = url.format(host) + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + content = construct_valid_ovf_env(data=odata) + fake_resp.return_value = mock.MagicMock(status_code=200, text=content) + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + md, ud, cfg, d = dsa._reprovision() + self.assertEqual(md['local-hostname'], hostname) + self.assertEqual(cfg['system_info']['default_user']['name'], username) + self.assertEqual(fake_resp.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs()}, + method='GET', timeout=60.0, url=full_url), + mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs()}, + method='GET', url=full_url)]) + self.assertEqual(m_dhcp.call_count, 1) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1') + self.assertEqual(m_net.call_count, 1) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch('os.path.isfile') + def test__should_reprovision_with_true_cfg(self, isfile, write_f, *args): + """The _should_reprovision method should return true with config + flag present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'PreprovisionedVm': True}, None))) + + @mock.patch('os.path.isfile') + def test__should_reprovision_with_file_existing(self, isfile, *args): + """The _should_reprovision method should return True if the sentinal + exists.""" + isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'preprovisionedvm': False}, None))) + + @mock.patch('os.path.isfile') + def test__should_reprovision_returns_false(self, isfile, *args): + """The _should_reprovision method should return False + if config and sentinal are not present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(dsa._should_reprovision((None, None, {}, None))) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index e4c59907..f6a59b6b 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -3,6 +3,7 @@ import copy from cloudinit.cs_utils import Cepko +from cloudinit import helpers from cloudinit import sources from cloudinit.sources import DataSourceCloudSigma @@ -38,10 +39,12 @@ class CepkoMock(Cepko): return self -class DataSourceCloudSigmaTest(test_helpers.TestCase): +class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + "", "", paths=self.paths) self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) self.datasource.get_data() @@ -85,7 +88,8 @@ class DataSourceCloudSigmaTest(test_helpers.TestCase): def test_lack_of_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() @@ -94,7 +98,8 @@ class DataSourceCloudSigmaTest(test_helpers.TestCase): def test_lack_of_cloudinit_key_in_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"]["cloudinit"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index 96144b64..d6d2d6b2 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -33,6 +33,7 @@ class TestCloudStackPasswordFetching(CiTestCase): self.patches.enter_context(mock.patch( mod_name + '.dhcp.networkd_get_option_from_leases', get_networkd_server_address)) + self.tmp = self.tmp_dir() def _set_password_server_response(self, response_string): subp = mock.MagicMock(return_value=(response_string, '')) @@ -43,26 +44,30 @@ class TestCloudStackPasswordFetching(CiTestCase): def test_empty_password_doesnt_create_config(self): self._set_password_server_response('') - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertEqual({}, ds.get_config_obj()) def test_saved_password_doesnt_create_config(self): self._set_password_server_response('saved_password') - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertEqual({}, ds.get_config_obj()) def test_password_sets_password(self): password = 'SekritSquirrel' self._set_password_server_response(password) - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertEqual(password, ds.get_config_obj()['password']) def test_bad_request_doesnt_stop_ds_from_working(self): self._set_password_server_response('bad_request') - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) self.assertTrue(ds.get_data()) def assertRequestTypesSent(self, subp, expected_request_types): @@ -77,14 +82,16 @@ class TestCloudStackPasswordFetching(CiTestCase): def test_valid_response_means_password_marked_as_saved(self): password = 'SekritSquirrel' subp = self._set_password_server_response(password) - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertRequestTypesSent(subp, ['send_my_password', 'saved_password']) def _check_password_not_saved_for(self, response_string): subp = self._set_password_server_response(response_string) - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertRequestTypesSent(subp, ['send_my_password']) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 237c189b..68400f22 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -3,9 +3,6 @@ from copy import copy, deepcopy import json import os -import shutil -import six -import tempfile from cloudinit import helpers from cloudinit.net import eni @@ -15,7 +12,7 @@ from cloudinit.sources import DataSourceConfigDrive as ds from cloudinit.sources.helpers import openstack from cloudinit import util -from cloudinit.tests.helpers import TestCase, ExitStack, mock +from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' @@ -223,12 +220,11 @@ CFG_DRIVE_FILES_V2 = { 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} -class TestConfigDriveDataSource(TestCase): +class TestConfigDriveDataSource(CiTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + self.tmp = self.tmp_dir() def test_ec2_metadata(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) @@ -462,6 +458,12 @@ class TestConfigDriveDataSource(TestCase): self.assertEqual(["/dev/vdb3"], ds.find_candidate_devs()) + # Verify that uppercase labels are also found. + devs_with_answers = {"TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=CONFIG-2": ["/dev/vdb"]} + self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) + finally: util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition @@ -469,31 +471,27 @@ class TestConfigDriveDataSource(TestCase): @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') def test_pubkeys_v2(self, on_first_boot): """Verify that public-keys work in config-drive-v2.""" - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - myds = cfg_ds_from_dir(self.tmp) + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertEqual(myds.get_public_ssh_keys(), [OSTACK_META['public_keys']['mykey']]) -class TestNetJson(TestCase): +class TestNetJson(CiTestCase): def setUp(self): super(TestNetJson, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + self.tmp = self.tmp_dir() self.maxDiff = None @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') def test_network_data_is_found(self, on_first_boot): """Verify that network_data is present in ds in config-drive-v2.""" - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - myds = cfg_ds_from_dir(self.tmp) + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertIsNotNone(myds.network_json) @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') def test_network_config_is_converted(self, on_first_boot): """Verify that network_data is converted and present on ds object.""" - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - myds = cfg_ds_from_dir(self.tmp) + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) network_config = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS) self.assertEqual(myds.network_config, network_config) @@ -598,11 +596,10 @@ class TestNetJson(TestCase): self.assertEqual(out_data, conv_data) -class TestConvertNetworkData(TestCase): +class TestConvertNetworkData(CiTestCase): def setUp(self): super(TestConvertNetworkData, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + self.tmp = self.tmp_dir() def _getnames_in_config(self, ncfg): return set([n['name'] for n in ncfg['config'] @@ -724,14 +721,18 @@ class TestConvertNetworkData(TestCase): self.assertEqual(expected, config_name2mac) -def cfg_ds_from_dir(seed_d): - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, - helpers.Paths({})) - cfg_ds.seed_dir = seed_d +def cfg_ds_from_dir(base_d, files=None): + run = os.path.join(base_d, "run") + os.mkdir(run) + cfg_ds = ds.DataSourceConfigDrive( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run})) + cfg_ds.seed_dir = os.path.join(base_d, "seed") + if files: + populate_dir(cfg_ds.seed_dir, files) cfg_ds.known_macs = KNOWN_MACS.copy() if not cfg_ds.get_data(): raise RuntimeError("Data source did not extract itself from" - " seed directory %s" % seed_d) + " seed directory %s" % cfg_ds.seed_dir) return cfg_ds @@ -749,17 +750,4 @@ def populate_ds_from_read_config(cfg_ds, source, results): cfg_ds.network_json, known_macs=KNOWN_MACS) -def populate_dir(seed_dir, files): - for (name, content) in files.items(): - path = os.path.join(seed_dir, name) - dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) - if isinstance(content, six.text_type): - mode = "w" - else: - mode = "wb" - with open(path, mode) as fp: - fp.write(content) - # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index f264f361..3127014b 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -13,7 +13,7 @@ from cloudinit import settings from cloudinit.sources import DataSourceDigitalOcean from cloudinit.sources.helpers import digitalocean -from cloudinit.tests.helpers import mock, TestCase +from cloudinit.tests.helpers import mock, CiTestCase DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] @@ -135,14 +135,17 @@ def _mock_dmi(): return (True, DO_META.get('id')) -class TestDataSourceDigitalOcean(TestCase): +class TestDataSourceDigitalOcean(CiTestCase): """ Test reading the meta-data """ + def setUp(self): + super(TestDataSourceDigitalOcean, self).setUp() + self.tmp = self.tmp_dir() def get_ds(self, get_sysinfo=_mock_dmi): ds = DataSourceDigitalOcean.DataSourceDigitalOcean( - settings.CFG_BUILTIN, None, helpers.Paths({})) + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) ds.use_ip4LL = False if get_sysinfo is not None: ds._get_sysinfo = get_sysinfo @@ -194,11 +197,10 @@ class TestDataSourceDigitalOcean(TestCase): self.assertIsInstance(ds.get_public_ssh_keys(), list) -class TestNetworkConvert(TestCase): +class TestNetworkConvert(CiTestCase): - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def _get_networking(self, m_get_by_mac): - m_get_by_mac.return_value = { + def _get_networking(self): + self.m_get_by_mac.return_value = { '04:01:57:d1:9e:01': 'ens1', '04:01:57:d1:9e:02': 'ens2', 'b8:ae:ed:75:5f:9a': 'enp0s25', @@ -208,6 +210,10 @@ class TestNetworkConvert(TestCase): self.assertIn('config', netcfg) return netcfg + def setUp(self): + super(TestNetworkConvert, self).setUp() + self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac') + def test_networking_defined(self): netcfg = self._get_networking() self.assertIsNotNone(netcfg) diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index ba328ee9..0f7267bb 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -186,6 +186,7 @@ class TestEc2(test_helpers.HttprettyTestCase): super(TestEc2, self).setUp() self.datasource = ec2.DataSourceEc2 self.metadata_addr = self.datasource.metadata_urls[0] + self.tmp = self.tmp_dir() def data_url(self, version): """Return a metadata url based on the version provided.""" @@ -199,7 +200,7 @@ class TestEc2(test_helpers.HttprettyTestCase): def _setup_ds(self, sys_cfg, platform_data, md, md_version=None): self.uris = [] distro = {} - paths = helpers.Paths({}) + paths = helpers.Paths({'run_dir': self.tmp}) if sys_cfg is None: sys_cfg = {} ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) @@ -329,7 +330,8 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.fallback_nic = 'eth9' with mock.patch(get_interface_mac_path) as m_get_interface_mac: m_get_interface_mac.return_value = mac1 - ds.network_config # Will re-crawl network metadata + nc = ds.network_config # Will re-crawl network metadata + self.assertIsNotNone(nc) self.assertIn('Re-crawl of metadata service', self.logs.getvalue()) expected = {'version': 1, 'config': [ {'mac_address': '06:17:04:d7:26:09', @@ -423,7 +425,7 @@ class TestEc2(test_helpers.HttprettyTestCase): self.logs.getvalue()) @httpretty.activate - @mock.patch('cloudinit.net.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.find_fallback_nic') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index d399ae7a..f77c2c40 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -4,13 +4,16 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import datetime import httpretty +import json import mock import re from base64 import b64encode, b64decode from six.moves.urllib_parse import urlparse +from cloudinit import distros from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceGCE @@ -21,10 +24,7 @@ from cloudinit.tests import helpers as test_helpers GCE_META = { 'instance/id': '123', 'instance/zone': 'foo/bar', - 'project/attributes/sshKeys': 'user:ssh-rsa AA2..+aRD0fyVw== root@server', 'instance/hostname': 'server.project-foo.local', - # UnicodeDecodeError below if set to ds.userdata instead of userdata_raw - 'instance/attributes/user-data': b'/bin/echo \xff\n', } GCE_META_PARTIAL = { @@ -37,11 +37,13 @@ GCE_META_ENCODING = { 'instance/id': '12345', 'instance/hostname': 'server.project-baz.local', 'instance/zone': 'baz/bang', - 'instance/attributes/user-data': b64encode(b'/bin/echo baz\n'), - 'instance/attributes/user-data-encoding': 'base64', + 'instance/attributes': { + 'user-data': b64encode(b'/bin/echo baz\n').decode('utf-8'), + 'user-data-encoding': 'base64', + } } -HEADERS = {'X-Google-Metadata-Request': 'True'} +HEADERS = {'Metadata-Flavor': 'Google'} MD_URL_RE = re.compile( r'http://metadata.google.internal/computeMetadata/v1/.*') @@ -54,10 +56,15 @@ def _set_mock_metadata(gce_meta=None): url_path = urlparse(uri).path if url_path.startswith('/computeMetadata/v1/'): path = url_path.split('/computeMetadata/v1/')[1:][0] + recursive = path.endswith('/') + path = path.rstrip('/') else: path = None if path in gce_meta: - return (200, headers, gce_meta.get(path)) + response = gce_meta.get(path) + if recursive: + response = json.dumps(response) + return (200, headers, response) else: return (404, headers, '') @@ -69,10 +76,21 @@ def _set_mock_metadata(gce_meta=None): @httpretty.activate class TestDataSourceGCE(test_helpers.HttprettyTestCase): + def _make_distro(self, dtype, def_user=None): + cfg = dict(settings.CFG_BUILTIN) + cfg['system_info']['distro'] = dtype + paths = helpers.Paths(cfg['system_info']['paths']) + distro_cls = distros.fetch(dtype) + if def_user: + cfg['system_info']['default_user'] = def_user.copy() + distro = distro_cls(dtype, cfg['system_info'], paths) + return distro + def setUp(self): + tmp = self.tmp_dir() self.ds = DataSourceGCE.DataSourceGCE( settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': tmp})) ppatch = self.m_platform_reports_gce = mock.patch( 'cloudinit.sources.DataSourceGCE.platform_reports_gce') self.m_platform_reports_gce = ppatch.start() @@ -89,6 +107,10 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertDictContainsSubset(HEADERS, req_header) def test_metadata(self): + # UnicodeDecodeError if set to ds.userdata instead of userdata_raw + meta = GCE_META.copy() + meta['instance/attributes/user-data'] = b'/bin/echo \xff\n' + _set_mock_metadata() self.ds.get_data() @@ -117,8 +139,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): _set_mock_metadata(GCE_META_ENCODING) self.ds.get_data() - decoded = b64decode( - GCE_META_ENCODING.get('instance/attributes/user-data')) + instance_data = GCE_META_ENCODING.get('instance/attributes') + decoded = b64decode(instance_data.get('user-data')) self.assertEqual(decoded, self.ds.get_userdata_raw()) def test_missing_required_keys_return_false(self): @@ -130,33 +152,124 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual(False, self.ds.get_data()) httpretty.reset() - def test_project_level_ssh_keys_are_used(self): + def test_no_ssh_keys_metadata(self): _set_mock_metadata() self.ds.get_data() + self.assertEqual([], self.ds.get_public_ssh_keys()) + + def test_cloudinit_ssh_keys(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + self.ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + @mock.patch("cloudinit.sources.DataSourceGCE.ug_util") + def test_default_user_ssh_keys(self, mock_ug_util): + mock_ug_util.normalize_users_groups.return_value = None, None + mock_ug_util.extract_default.return_value = 'ubuntu', None + ubuntu_ds = DataSourceGCE.DataSourceGCE( + settings.CFG_BUILTIN, self._make_distro('ubuntu'), + helpers.Paths({'run_dir': self.tmp_dir()})) + + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } - # we expect a list of public ssh keys with user names stripped - self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'], - self.ds.get_public_ssh_keys()) + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + ubuntu_ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys())) + + def test_instance_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)), + 'block-project-ssh-keys': 'False', + } - def test_instance_level_ssh_keys_are_used(self): - key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes _set_mock_metadata(meta) self.ds.get_data() - self.assertIn(key_content, self.ds.get_public_ssh_keys()) + expected = [valid_key.format(key) for key in range(2)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + def test_block_project_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'block-project-ssh-keys': 'True', + } - def test_instance_level_keys_replace_project_level_keys(self): - key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes _set_mock_metadata(meta) self.ds.get_data() - self.assertEqual([key_content], self.ds.get_public_ssh_keys()) + expected = [valid_key.format(0)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) def test_only_last_part_of_zone_used_for_availability_zone(self): _set_mock_metadata() @@ -171,5 +284,44 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual(False, ret) m_fetcher.assert_not_called() + def test_has_expired(self): + + def _get_timestamp(days): + format_str = '%Y-%m-%dT%H:%M:%S+0000' + today = datetime.datetime.now() + timestamp = today + datetime.timedelta(days=days) + return timestamp.strftime(format_str) + + past = _get_timestamp(-1) + future = _get_timestamp(1) + ssh_keys = { + None: False, + '': False, + 'Invalid': False, + 'user:ssh-rsa key user@domain.com': False, + 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False, + 'user:ssh-rsa key google-ssh': False, + 'user:ssh-rsa key google-ssh {invalid:json}': False, + 'user:ssh-rsa key google-ssh {"userName":"user"}': False, + 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True, + } + + for key, expired in ssh_keys.items(): + self.assertEqual(DataSourceGCE._has_expired(key), expired) + + def test_parse_public_keys_non_ascii(self): + public_key_data = [ + 'cloudinit:rsa ssh-ke%s invalid' % chr(165), + 'use%sname:rsa ssh-key' % chr(174), + 'cloudinit:test 1', + 'default:test 2', + 'user:test 3', + ] + expected = ['test 1', 'test 2'] + found = DataSourceGCE._parse_public_keys( + public_key_data, default_user='default') + self.assertEqual(sorted(found), sorted(expected)) # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 289c6a40..6e4031cf 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from copy import copy +import mock import os import shutil import tempfile @@ -8,15 +9,10 @@ import yaml from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper -from cloudinit.tests.helpers import TestCase, populate_dir +from cloudinit.tests.helpers import CiTestCase, populate_dir -try: - from unittest import mock -except ImportError: - import mock - -class TestMAASDataSource(TestCase): +class TestMAASDataSource(CiTestCase): def setUp(self): super(TestMAASDataSource, self).setUp() @@ -159,4 +155,47 @@ class TestMAASDataSource(TestCase): self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) self.assertEqual(expected_vd, vd) + +@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper") +class TestGetOauthHelper(CiTestCase): + with_logs = True + base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY', + 'token_key': 'FAKE_TOKEN_KEY', + 'token_secret': 'FAKE_TOKEN_SECRET', + 'consumer_secret': None} + + def test_all_required(self, m_helper): + """Valid config as expected.""" + DataSourceMAAS.get_oauth_helper(self.base_cfg.copy()) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + def test_other_fields_not_passed_through(self, m_helper): + """Only relevant fields are passed through.""" + mycfg = self.base_cfg.copy() + mycfg['unrelated_field'] = 'unrelated' + DataSourceMAAS.get_oauth_helper(mycfg) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + +class TestGetIdHash(CiTestCase): + v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY', + 'token_secret': 'TSEC'} + v1_id = ( + 'v1:' + '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392') + + def test_v1_expected(self): + """Test v1 id generated as expected working behavior from config.""" + result = DataSourceMAAS.get_id_from_ds_cfg(self.v1_cfg.copy()) + self.assertEqual(self.v1_id, result) + + def test_v1_extra_fields_are_ignored(self): + """Test v1 id ignores unused entries in config.""" + cfg = self.v1_cfg.copy() + cfg['consumer_secret'] = "BOO" + cfg['unrelated'] = "HI MOM" + result = DataSourceMAAS.get_id_from_ds_cfg(cfg) + self.assertEqual(self.v1_id, result) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index fea9156b..70d50de4 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -3,22 +3,20 @@ from cloudinit import helpers from cloudinit.sources import DataSourceNoCloud from cloudinit import util -from cloudinit.tests.helpers import TestCase, populate_dir, mock, ExitStack +from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack import os -import shutil -import tempfile import textwrap import yaml -class TestNoCloudDataSource(TestCase): +class TestNoCloudDataSource(CiTestCase): def setUp(self): super(TestNoCloudDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.cmdline = "root=TESTCMDLINE" @@ -215,7 +213,7 @@ class TestNoCloudDataSource(TestCase): self.assertNotIn(gateway, str(dsrc.network_config)) -class TestParseCommandLineData(TestCase): +class TestParseCommandLineData(CiTestCase): def test_parse_cmdline_data_valid(self): ds_id = "ds=nocloud" diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index e7d55692..5c3ba012 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -3,12 +3,11 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util -from cloudinit.tests.helpers import mock, populate_dir, TestCase +from cloudinit.tests.helpers import mock, populate_dir, CiTestCase +from textwrap import dedent import os import pwd -import shutil -import tempfile import unittest @@ -32,18 +31,20 @@ USER_DATA = '#cloud-config\napt_upgrade: true' SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' HOSTNAME = 'foo.example.com' PUBLIC_IP = '10.0.0.3' +MACADDR = '02:00:0a:12:01:01' +IP_BY_MACADDR = '10.18.1.1' DS_PATH = "cloudinit.sources.DataSourceOpenNebula" -class TestOpenNebulaDataSource(TestCase): +class TestOpenNebulaDataSource(CiTestCase): parsed_user = None def setUp(self): super(TestOpenNebulaDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) # defaults for few tests self.ds = ds.DataSourceOpenNebula @@ -197,24 +198,96 @@ class TestOpenNebulaDataSource(TestCase): @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_hostname(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} - for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: PUBLIC_IP}) - results = ds.read_context_disk_dir(my_d) + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: PUBLIC_IP}) + results = ds.read_context_disk_dir(my_d) - self.assertTrue('metadata' in results) - self.assertTrue('local-hostname' in results['metadata']) - self.assertEqual(PUBLIC_IP, results['metadata']['local-hostname']) + self.assertTrue('metadata' in results) + self.assertTrue('local-hostname' in results['metadata']) + self.assertEqual( + PUBLIC_IP, results['metadata']['local-hostname']) @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_network_interfaces(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} - populate_context_dir(self.seed_dir, {'ETH0_IP': '1.2.3.4'}) - results = ds.read_context_disk_dir(self.seed_dir) - - self.assertTrue('network-interfaces' in results) - self.assertTrue('1.2.3.4' in results['network-interfaces']) + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + + # without ETH0_MAC + # for Older OpenNebula? + populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_IP and ETH0_MAC + populate_context_dir( + self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_IP with empty string and ETH0_MAC + # in the case of using Virtual Network contains + # "AR = [ TYPE = ETHER ]" + populate_context_dir( + self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_NETWORK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_NETWORK': '10.18.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('10.18.0.0' in results['network-interfaces']) + + # ETH0_NETWORK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_NETWORK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('10.18.1.0' in results['network-interfaces']) + + # ETH0_MASK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '255.255.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('255.255.0.0' in results['network-interfaces']) + + # ETH0_MASK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('255.255.255.0' in results['network-interfaces']) def test_find_candidates(self): def my_devs_with(criteria): @@ -235,7 +308,7 @@ class TestOpenNebulaDataSource(TestCase): class TestOpenNebulaNetwork(unittest.TestCase): - system_nics = {'02:00:0a:12:01:01': 'eth0'} + system_nics = ('eth0', 'ens3') def test_lo(self): net = ds.OpenNebulaNetwork(context={}, system_nics_by_mac={}) @@ -246,45 +319,101 @@ iface lo inet loopback @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_eth0(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = self.system_nics - net = ds.OpenNebulaNetwork({}) - self.assertEqual(net.gen_conf(), u'''\ -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 10.18.1.1 - network 10.18.1.0 - netmask 255.255.255.0 -''') + for nic in self.system_nics: + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork({}) + self.assertEqual(net.gen_conf(), dedent("""\ + auto lo + iface lo inet loopback + + auto {dev} + iface {dev} inet static + #hwaddress {macaddr} + address 10.18.1.1 + network 10.18.1.0 + netmask 255.255.255.0 + """.format(dev=nic, macaddr=MACADDR))) def test_eth0_override(self): context = { 'DNS': '1.2.3.8', - 'ETH0_IP': '1.2.3.4', - 'ETH0_NETWORK': '1.2.3.0', + 'ETH0_IP': '10.18.1.1', + 'ETH0_NETWORK': '10.18.0.0', 'ETH0_MASK': '255.255.0.0', 'ETH0_GATEWAY': '1.2.3.5', 'ETH0_DOMAIN': 'example.com', - 'ETH0_DNS': '1.2.3.6 1.2.3.7' + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_MAC': '02:00:0a:12:01:01' } - - net = ds.OpenNebulaNetwork(context, - system_nics_by_mac=self.system_nics) - self.assertEqual(net.gen_conf(), u'''\ -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 1.2.3.4 - network 1.2.3.0 - netmask 255.255.0.0 - gateway 1.2.3.5 - dns-search example.com - dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 -''') + for nic in self.system_nics: + expected = dedent("""\ + auto lo + iface lo inet loopback + + auto {dev} + iface {dev} inet static + #hwaddress {macaddr} + address 10.18.1.1 + network 10.18.0.0 + netmask 255.255.0.0 + gateway 1.2.3.5 + dns-search example.com + dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 + """).format(dev=nic, macaddr=MACADDR) + net = ds.OpenNebulaNetwork(context, + system_nics_by_mac={MACADDR: nic}) + self.assertEqual(expected, net.gen_conf()) + + def test_multiple_nics(self): + """Test rendering multiple nics with names that differ from context.""" + MAC_1 = "02:00:0a:12:01:01" + MAC_2 = "02:00:0a:12:01:02" + context = { + 'DNS': '1.2.3.8', + 'ETH0_IP': '10.18.1.1', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_DOMAIN': 'example.com', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_MAC': MAC_2, + 'ETH3_IP': '10.3.1.3', + 'ETH3_NETWORK': '10.3.0.0', + 'ETH3_MASK': '255.255.0.0', + 'ETH3_GATEWAY': '10.3.0.1', + 'ETH3_DOMAIN': 'third.example.com', + 'ETH3_DNS': '10.3.1.2', + 'ETH3_MAC': MAC_1, + } + net = ds.OpenNebulaNetwork( + context, system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}) + + expected = dedent("""\ + auto lo + iface lo inet loopback + + auto enp0s25 + iface enp0s25 inet static + #hwaddress 02:00:0a:12:01:01 + address 10.3.1.3 + network 10.3.0.0 + netmask 255.255.0.0 + gateway 10.3.0.1 + dns-search third.example.com + dns-nameservers 1.2.3.8 10.3.1.2 + + auto enp1s2 + iface enp1s2 inet static + #hwaddress 02:00:0a:12:01:02 + address 10.18.1.1 + network 10.18.0.0 + netmask 255.255.0.0 + gateway 1.2.3.5 + dns-search example.com + dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 + """) + + self.assertEqual(expected, net.gen_conf()) class TestParseShellConfig(unittest.TestCase): diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index ed367e05..42c31554 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -131,6 +131,10 @@ def _read_metadata_service(): class TestOpenStackDataSource(test_helpers.HttprettyTestCase): VERSION = 'latest' + def setUp(self): + super(TestOpenStackDataSource, self).setUp() + self.tmp = self.tmp_dir() + @hp.activate def test_successful(self): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) @@ -232,7 +236,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) found = ds_os.get_data() self.assertTrue(found) @@ -256,7 +260,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) found = ds_os.get_data() self.assertFalse(found) @@ -271,7 +275,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) ds_os.ds_cfg = { 'max_wait': 0, 'timeout': 0, @@ -294,7 +298,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) ds_os.ds_cfg = { 'max_wait': 0, 'timeout': 0, diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 700da86c..fc4eb36e 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -5,11 +5,17 @@ # This file is part of cloud-init. See LICENSE file for license information. import base64 -from collections import OrderedDict +import os -from cloudinit.tests import helpers as test_helpers +from collections import OrderedDict +from textwrap import dedent +from cloudinit import util +from cloudinit.tests.helpers import CiTestCase, wrap_and_call +from cloudinit.helpers import Paths from cloudinit.sources import DataSourceOVF as dsovf +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptNotFound) OVF_ENV_CONTENT = """<?xml version="1.0" encoding="UTF-8"?> <Environment xmlns="http://schemas.dmtf.org/ovf/environment/1" @@ -42,7 +48,7 @@ def fill_properties(props, template=OVF_ENV_CONTENT): return template.format(properties=properties) -class TestReadOvfEnv(test_helpers.TestCase): +class TestReadOvfEnv(CiTestCase): def test_with_b64_userdata(self): user_data = "#!/bin/sh\necho hello world\n" user_data_b64 = base64.b64encode(user_data.encode()).decode() @@ -72,7 +78,104 @@ class TestReadOvfEnv(test_helpers.TestCase): self.assertIsNone(ud) -class TestTransportIso9660(test_helpers.CiTestCase): +class TestMarkerFiles(CiTestCase): + + def setUp(self): + super(TestMarkerFiles, self).setUp() + self.tdir = self.tmp_dir() + + def test_false_when_markerid_none(self): + """Return False when markerid provided is None.""" + self.assertFalse( + dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)) + + def test_markerid_file_exist(self): + """Return False when markerid file path does not exist, + True otherwise.""" + self.assertFalse( + dsovf.check_marker_exists('123', self.tdir)) + + marker_file = self.tmp_path('.markerfile-123.txt', self.tdir) + util.write_file(marker_file, '') + self.assertTrue( + dsovf.check_marker_exists('123', self.tdir) + ) + + def test_marker_file_setup(self): + """Test creation of marker files.""" + markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir) + self.assertFalse(os.path.exists(markerfilepath)) + dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir) + self.assertTrue(os.path.exists(markerfilepath)) + + +class TestDatasourceOVF(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestDatasourceOVF, self).setUp() + self.datasource = dsovf.DataSourceOVF + self.tdir = self.tmp_dir() + + def test_get_data_false_on_none_dmi_data(self): + """When dmi for system-product-name is None, get_data returns False.""" + paths = Paths({'seed_dir': self.tdir}) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': None}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: No system-product-name found', self.logs.getvalue()) + + def test_get_data_no_vmware_customization_disabled(self): + """When vmware customization is disabled via sys_cfg log a message.""" + paths = Paths({'seed_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware'}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization for VMware platform is disabled.', + self.logs.getvalue()) + + def test_get_data_vmware_customization_disabled(self): + """When cloud-init workflow for vmware is enabled via sys_cfg log a + message. + """ + paths = Paths({'seed_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + +class TestTransportIso9660(CiTestCase): def setUp(self): super(TestTransportIso9660, self).setUp() diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 436df9ee..8dec06b1 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -9,7 +9,7 @@ from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceScaleway -from cloudinit.tests.helpers import mock, HttprettyTestCase, TestCase +from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase class DataResponses(object): @@ -63,7 +63,11 @@ class MetadataResponses(object): return 200, headers, json.dumps(cls.FAKE_METADATA) -class TestOnScaleway(TestCase): +class TestOnScaleway(CiTestCase): + + def setUp(self): + super(TestOnScaleway, self).setUp() + self.tmp = self.tmp_dir() def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): mock, faked = fake_dmi @@ -91,7 +95,7 @@ class TestOnScaleway(TestCase): # When not on Scaleway, get_data() returns False. datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({}) + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}) ) self.assertFalse(datasource.get_data()) @@ -159,8 +163,9 @@ def get_source_address_adapter(*args, **kwargs): class TestDataSourceScaleway(HttprettyTestCase): def setUp(self): + tmp = self.tmp_dir() self.datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({}) + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp}) ) super(TestDataSourceScaleway, self).setUp() diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 933d5b63..88bae5f9 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -359,7 +359,8 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.tmp = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp) - self.paths = c_helpers.Paths({'cloud_dir': self.tmp}) + self.paths = c_helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.legacy_user_d = os.path.join(self.tmp, 'legacy_user_tmp') os.mkdir(self.legacy_user_d) diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index aa13670a..5670904a 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -7,7 +7,11 @@ from cloudinit.tests.helpers import (TestCase, mock) class MyBaseDistro(distros.Distro): # MyBaseDistro is here to test base Distro class implementations - def __init__(self, name="basedistro", cfg={}, paths={}): + def __init__(self, name="basedistro", cfg=None, paths=None): + if not cfg: + cfg = {} + if not paths: + paths = {} super(MyBaseDistro, self).__init__(name, cfg, paths) def install_packages(self, pkglist): @@ -42,7 +46,6 @@ class MyBaseDistro(distros.Distro): @mock.patch("cloudinit.distros.util.subp") class TestCreateUser(TestCase): def setUp(self): - super(TestCase, self).setUp() self.dist = MyBaseDistro() def _useradd2call(self, args): diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index c4bd11bc..1c2e45fe 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -2,6 +2,8 @@ import os from six import StringIO +import stat +from textwrap import dedent try: from unittest import mock @@ -12,13 +14,12 @@ try: except ImportError: from contextlib2 import ExitStack -from cloudinit.tests.helpers import TestCase - from cloudinit import distros from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit import helpers from cloudinit.net import eni from cloudinit import settings +from cloudinit.tests.helpers import FilesystemMockingTestCase from cloudinit import util @@ -175,7 +176,7 @@ class WriteBuffer(object): return self.buffer.getvalue() -class TestNetCfgDistro(TestCase): +class TestNetCfgDistro(FilesystemMockingTestCase): frbsd_ifout = """\ hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 @@ -188,9 +189,6 @@ hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 status: active """ - def setUp(self): - super(TestNetCfgDistro, self).setUp() - def _get_distro(self, dname, renderers=None): cls = distros.fetch(dname) cfg = settings.CFG_BUILTIN @@ -774,4 +772,46 @@ ifconfig_vtnet0="DHCP" self.assertCfgEquals(expected_buf, str(write_buf)) self.assertEqual(write_buf.mode, 0o644) + def test_simple_write_opensuse(self): + """Opensuse network rendering writes appropriate sysconfg files.""" + tmpdir = self.tmp_dir() + self.patchOS(tmpdir) + self.patchUtils(tmpdir) + distro = self._get_distro('opensuse') + + distro.apply_network(BASE_NET_CFG, False) + + lo_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-lo') + eth0_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth0') + eth1_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth1') + expected_cfgs = { + lo_path: dedent(''' + STARTMODE="auto" + USERCONTROL="no" + FIREWALL="no" + '''), + eth0_path: dedent(''' + BOOTPROTO="static" + BROADCAST="192.168.1.0" + GATEWAY="192.168.1.254" + IPADDR="192.168.1.5" + NETMASK="255.255.255.0" + STARTMODE="auto" + USERCONTROL="no" + ETHTOOL_OPTIONS="" + '''), + eth1_path: dedent(''' + BOOTPROTO="dhcp" + STARTMODE="auto" + USERCONTROL="no" + ETHTOOL_OPTIONS="" + ''') + } + for cfgpath in (lo_path, eth0_path, eth1_path): + self.assertCfgEquals( + expected_cfgs[cfgpath], + util.load_file(cfgpath)) + file_stat = os.stat(cfgpath) + self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 1284e755..31cc6223 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -7,7 +7,7 @@ from uuid import uuid4 from cloudinit import safeyaml from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, dir2dict, json_dumps, populate_dir) + CiTestCase, dir2dict, populate_dir) UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") @@ -27,11 +27,20 @@ TYPE=ext4 PARTUUID=30c65c77-e07d-4039-b2fb-88b1fb5fa1fc """ +# this is a Ubuntu 18.04 disk.img output (dual uefi and bios bootable) +BLKID_UEFI_UBUNTU = [ + {'DEVNAME': 'vda1', 'TYPE': 'ext4', 'PARTUUID': uuid4(), 'UUID': uuid4()}, + {'DEVNAME': 'vda14', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda15', 'TYPE': 'vfat', 'LABEL': 'UEFI', 'PARTUUID': uuid4(), + 'UUID': '5F55-129B'}] + + POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled" POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=disabled" DI_EC2_STRICT_ID_DEFAULT = "true" +OVF_MATCH_STRING = 'http://schemas.dmtf.org/ovf/environment/1' SHELL_MOCK_TMPL = """\ %(name)s() { @@ -55,6 +64,7 @@ P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} +MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0} @@ -222,6 +232,11 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('ConfigDrive') return + def test_config_drive_upper(self): + """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" + self._test_ds_found('ConfigDriveUpper') + return + def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. @@ -296,6 +311,54 @@ class TestDsIdentify(CiTestCase): data, RC_FOUND, dslist=['OpenStack', 'None']) self.assertIn("check for 'OpenStack' returned maybe", err) + def test_default_ovf_is_found(self): + """OVF is identified found when ovf/ovf-env.xml seed file exists.""" + self._test_ds_found('OVF-seed') + + def test_default_ovf_with_detect_virt_none_not_found(self): + """OVF identifies not found when detect_virt returns "none".""" + self._check_via_dict( + {'ds': 'OVF'}, rc=RC_NOT_FOUND, policy_dmi="disabled") + + def test_default_ovf_returns_not_found_on_azure(self): + """OVF datasource won't be found as false positive on Azure.""" + ovfonazure = copy.deepcopy(VALID_CFG['OVF']) + # Set azure asset tag to assert OVF content not found + ovfonazure['files'][P_CHASSIS_ASSET_TAG] = ( + '7783-7084-3265-9085-8269-3286-77\n') + self._check_via_dict( + ovfonazure, RC_FOUND, dslist=['Azure', DS_NONE]) + + def test_ovf_on_vmware_iso_found_by_cdrom_with_ovf_schema_match(self): + """OVF is identified when iso9660 cdrom path contains ovf schema.""" + self._test_ds_found('OVF') + + def test_ovf_on_vmware_iso_found_when_vmware_customization(self): + """OVF is identified when vmware customization is enabled.""" + self._test_ds_found('OVF-vmware-customization') + + def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): + """OVF is identified by well-known iso9660 labels.""" + ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF']) + # Unset matching cdrom ovf schema content + ovf_cdrom_by_label['files']['dev/sr0'] = 'No content match' + self._check_via_dict( + ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled") + + # Add recognized labels + valid_ovf_labels = ['ovf-transport', 'OVF-TRANSPORT', + "OVFENV", "ovfenv"] + for valid_ovf_label in valid_ovf_labels: + ovf_cdrom_by_label['mocks'][0]['out'] = blkid_out([ + {'DEVNAME': 'sr0', 'TYPE': 'iso9660', + 'LABEL': valid_ovf_label}]) + self._check_via_dict( + ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + + def test_default_nocloud_as_vdb_iso9660(self): + """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" + self._test_ds_found('NoCloud') + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" @@ -305,7 +368,9 @@ def blkid_out(disks=None): for disk in disks: if not disk["DEVNAME"].startswith("/dev/"): disk["DEVNAME"] = "/dev/" + disk["DEVNAME"] - for key in disk: + # devname needs to be first. + lines.append("%s=%s" % ("DEVNAME", disk["DEVNAME"])) + for key in [d for d in disk if d != "DEVNAME"]: lines.append("%s=%s" % (key, disk[key])) lines.append("") return '\n'.join(lines) @@ -319,7 +384,7 @@ def _print_run_output(rc, out, err, cfg, files): '-- rc = %s --' % rc, '-- out --', str(out), '-- err --', str(err), - '-- cfg --', json_dumps(cfg)])) + '-- cfg --', util.json_dumps(cfg)])) print('-- files --') for k, v in files.items(): if "/_shwrap" in k: @@ -376,6 +441,19 @@ VALID_CFG = { 'files': {P_PRODUCT_SERIAL: 'GoogleCloud-8f2e88f\n'}, 'mocks': [MOCK_VIRT_IS_KVM], }, + 'NoCloud': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'OpenStack': { 'ds': 'OpenStack', 'files': {P_PRODUCT_NAME: 'OpenStack Nova\n'}, @@ -383,6 +461,43 @@ VALID_CFG = { 'policy_dmi': POLICY_FOUND_ONLY, 'policy_no_dmi': POLICY_FOUND_ONLY, }, + 'OVF-seed': { + 'ds': 'OVF', + 'files': { + os.path.join(P_SEED_DIR, 'ovf', 'ovf-env.xml'): 'present\n', + } + }, + 'OVF-vmware-customization': { + 'ds': 'OVF', + 'mocks': [ + # Include a mockes iso9660 potential, even though content not ovf + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}]) + }, + MOCK_VIRT_IS_VMWARE, + ], + 'files': { + 'dev/sr0': 'no match', + # Setup vmware customization enabled + 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so': 'here', + 'etc/cloud/cloud.cfg': 'disable_vmware_customization: false\n', + } + }, + 'OVF': { + 'ds': 'OVF', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}]) + }, + MOCK_VIRT_IS_VMWARE, + ], + 'files': { + 'dev/sr0': 'pretend ovf iso has ' + OVF_MATCH_STRING + '\n', + } + }, 'ConfigDrive': { 'ds': 'ConfigDrive', 'mocks': [ @@ -395,6 +510,18 @@ VALID_CFG = { }, ], }, + 'ConfigDriveUpper': { + 'ds': 'ConfigDrive', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CONFIG-2'}]) + }, + ], + }, } # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index e0d9ab6c..a2054980 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -25,9 +25,6 @@ class TestLxd(t_help.CiTestCase): } } - def setUp(self): - super(TestLxd, self).setUp() - def _get_cloud(self, distro): cls = distros.fetch(distro) paths = helpers.Paths({}) diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 85a0fe0a..3c726422 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -9,9 +9,6 @@ from cloudinit.tests.helpers import mock class TestLoadPowerState(t_help.TestCase): - def setUp(self): - super(self.__class__, self).setUp() - def test_no_config(self): # completely empty config should mean do nothing (cmd, _timeout, _condition) = psc.load_power_state({}) diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 29d5574d..5aa3c498 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.config.cc_resizefs import ( - can_skip_resize, handle, maybe_get_writable_device_path) + can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs) from collections import namedtuple import logging @@ -293,5 +293,25 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): " per kernel cmdline", self.logs.getvalue()) + @mock.patch('cloudinit.util.mount_is_read_write') + @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw): + """Do not resize / directly if it is read-only. (LP: #1734787).""" + m_is_rw.return_value = False + m_is_dir.return_value = True + self.assertEqual( + ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'), + _resize_btrfs("/", "/dev/sda1")) + + @mock.patch('cloudinit.util.mount_is_read_write') + @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw): + """Do not resize / directly if it is read-only. (LP: #1734787).""" + m_is_rw.return_value = True + m_is_dir.return_value = True + self.assertEqual( + ('btrfs', 'filesystem', 'resize', 'max', '/'), + _resize_btrfs("/", "/dev/sda1")) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index b7adbe50..b90a3af3 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -5,10 +5,6 @@ from cloudinit import util from cloudinit.tests import helpers -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser import logging import shutil from six import StringIO @@ -58,8 +54,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") - parser = ConfigParser() - parser.readfp(StringIO(contents)) + parser = self.parse_and_read(StringIO(contents)) expected = { 'epel_testing': { 'name': 'Extra Packages for Enterprise Linux 5 - Testing', @@ -95,8 +90,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo") - parser = ConfigParser() - parser.readfp(StringIO(contents)) + parser = self.parse_and_read(StringIO(contents)) expected = { 'puppetlabs_products': { 'name': 'Puppet Labs Products El 6 - $basearch', diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/test_handler/test_handler_zypper_add_repo.py index 315c2a5e..72ab6c08 100644 --- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py +++ b/tests/unittests/test_handler/test_handler_zypper_add_repo.py @@ -9,10 +9,6 @@ from cloudinit import util from cloudinit.tests import helpers from cloudinit.tests.helpers import mock -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser import logging from six import StringIO @@ -70,8 +66,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): root_d = self.tmp_dir() cc_zypper_add_repo._write_repos(cfg['repos'], root_d) contents = util.load_file("%s/testing-foo.repo" % root_d) - parser = ConfigParser() - parser.readfp(StringIO(contents)) + parser = self.parse_and_read(StringIO(contents)) expected = { 'testing-foo': { 'name': 'test-foo', diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index f3fa2a30..ac33e8ef 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,9 +1,9 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import net -from cloudinit.net import _natural_sort_key from cloudinit.net import cmdline from cloudinit.net import eni +from cloudinit.net import natural_sort_key from cloudinit.net import netplan from cloudinit.net import network_state from cloudinit.net import renderers @@ -2708,11 +2708,11 @@ class TestInterfacesSorting(CiTestCase): def test_natural_order(self): data = ['ens5', 'ens6', 'ens3', 'ens20', 'ens13', 'ens2'] self.assertEqual( - sorted(data, key=_natural_sort_key), + sorted(data, key=natural_sort_key), ['ens2', 'ens3', 'ens5', 'ens6', 'ens13', 'ens20']) data2 = ['enp2s0', 'enp2s3', 'enp0s3', 'enp0s13', 'enp0s8', 'enp1s2'] self.assertEqual( - sorted(data2, key=_natural_sort_key), + sorted(data2, key=natural_sort_key), ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) @@ -2948,4 +2948,16 @@ class TestRenameInterfaces(CiTestCase): mock_subp.assert_has_calls(expected) +class TestNetworkState(CiTestCase): + + def test_bcast_addr(self): + """Test mask_and_ipv4_to_bcast_addr proper execution.""" + bcast_addr = network_state.mask_and_ipv4_to_bcast_addr + self.assertEqual("192.168.1.255", + bcast_addr("255.255.255.0", "192.168.1.1")) + self.assertEqual("128.42.7.255", + bcast_addr("255.255.248.0", "128.42.5.4")) + self.assertEqual("10.1.21.255", + bcast_addr("255.255.255.0", "10.1.21.4")) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index 571420ed..e15ba6cf 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -126,7 +126,7 @@ class TestBaseReportingHandler(TestCase): def test_base_reporting_handler_is_abstract(self): regexp = r".*abstract.*publish_event.*" - self.assertRaisesRegexp(TypeError, regexp, handlers.ReportingHandler) + self.assertRaisesRegex(TypeError, regexp, handlers.ReportingHandler) class TestLogHandler(TestCase): diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index add93653..5d3f1ca3 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -23,6 +23,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): cfg = { 'datasource_list': ['None'], 'cloud_init_modules': ['write-files'], + 'system_info': {'paths': {'run_dir': new_root}} } ud = self.readResource('user_data.1.txt') cloud_cfg = util.yaml_dumps(cfg) diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index b8fb4794..762974e9 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -2,10 +2,10 @@ import os -from cloudinit.tests import helpers from cloudinit.settings import PER_INSTANCE from cloudinit import stages +from cloudinit.tests import helpers from cloudinit import util @@ -23,6 +23,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): 'datasource_list': ['None'], 'runcmd': ['ls /etc'], # test ALL_DISTROS 'spacewalk': {}, # test non-ubuntu distros module definition + 'system_info': {'paths': {'run_dir': self.new_root}}, 'write_files': [ { 'path': '/etc/blah.ini', diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index b911d929..53154d33 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -14,7 +14,7 @@ from cloudinit import templater try: import Cheetah HAS_CHEETAH = True - Cheetah # make pyflakes happy, as Cheetah is not used here + c = Cheetah # make pyflakes and pylint happy, as Cheetah is not used here except ImportError: HAS_CHEETAH = False diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 3e4154ca..4a92e741 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -299,6 +299,14 @@ class TestLoadYaml(helpers.TestCase): default=self.mydefault), myobj) + def test_none_returns_default(self): + """If yaml.load returns None, then default should be returned.""" + blobs = ("", " ", "# foo\n", "#") + mdef = self.mydefault + self.assertEqual( + [(b, self.mydefault) for b in blobs], + [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs]) + class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_invalid_mountinfo(self): @@ -477,6 +485,44 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): self.assertIsNone(util.read_dmi_data("system-product-name")) +class TestGetConfigLogfiles(helpers.CiTestCase): + + def test_empty_cfg_returns_empty_list(self): + """An empty config passed to get_config_logfiles returns empty list.""" + self.assertEqual([], util.get_config_logfiles(None)) + self.assertEqual([], util.get_config_logfiles({})) + + def test_default_log_file_present(self): + """When default_log_file is set get_config_logfiles finds it.""" + self.assertEqual( + ['/my.log'], + util.get_config_logfiles({'def_log_file': '/my.log'})) + + def test_output_logs_parsed_when_teeing_files(self): + """When output configuration is parsed when teeing files.""" + self.assertEqual( + ['/himom.log', '/my.log'], + sorted(util.get_config_logfiles({ + 'def_log_file': '/my.log', + 'output': {'all': '|tee -a /himom.log'}}))) + + def test_output_logs_parsed_when_redirecting(self): + """When output configuration is parsed when redirecting to a file.""" + self.assertEqual( + ['/my.log', '/test.log'], + sorted(util.get_config_logfiles({ + 'def_log_file': '/my.log', + 'output': {'all': '>/test.log'}}))) + + def test_output_logs_parsed_when_appending(self): + """When output configuration is parsed when appending to a file.""" + self.assertEqual( + ['/my.log', '/test.log'], + sorted(util.get_config_logfiles({ + 'def_log_file': '/my.log', + 'output': {'all': '>> /test.log'}}))) + + class TestMultiLog(helpers.FilesystemMockingTestCase): def _createConsole(self, root): @@ -577,6 +623,7 @@ class TestSubp(helpers.CiTestCase): utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] + bogus_command = 'this-is-not-expected-to-be-a-program-name' def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf @@ -657,15 +704,29 @@ class TestSubp(helpers.CiTestCase): util.write_file(noshebang, 'true\n') os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) - self.assertRaisesRegexp(util.ProcessExecutionError, - 'Missing #! in script\?', - util.subp, (noshebang,)) + self.assertRaisesRegex(util.ProcessExecutionError, + 'Missing #! in script\?', + util.subp, (noshebang,)) def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) self.assertIsNone(err) self.assertIsNone(out) + def test_exception_has_out_err_are_bytes_if_decode_false(self): + """Raised exc should have stderr, stdout as bytes if no decode.""" + with self.assertRaises(util.ProcessExecutionError) as cm: + util.subp([self.bogus_command], decode=False) + self.assertTrue(isinstance(cm.exception.stdout, bytes)) + self.assertTrue(isinstance(cm.exception.stderr, bytes)) + + def test_exception_has_out_err_are_bytes_if_decode_true(self): + """Raised exc should have stderr, stdout as string if no decode.""" + with self.assertRaises(util.ProcessExecutionError) as cm: + util.subp([self.bogus_command], decode=True) + self.assertTrue(isinstance(cm.exception.stdout, six.string_types)) + self.assertTrue(isinstance(cm.exception.stderr, six.string_types)) + def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", util.target_path("/target/", "//my/path/")) diff --git a/tests/unittests/test_vmware/__init__.py b/tests/unittests/test_vmware/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/tests/unittests/test_vmware/__init__.py diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/test_vmware/test_custom_script.py new file mode 100644 index 00000000..2d9519b0 --- /dev/null +++ b/tests/unittests/test_vmware/test_custom_script.py @@ -0,0 +1,99 @@ +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2017 VMware INC. +# +# Author: Maitreyee Saikia <msaikia@vmware.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptConstant, + CustomScriptNotFound, + PreCustomScript, + PostCustomScript, +) +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestVmwareCustomScript(CiTestCase): + def setUp(self): + self.tmpDir = self.tmp_dir() + + def test_prepare_custom_script(self): + """ + This test is designed to verify the behavior based on the presence of + custom script. Mainly needed for scenario where a custom script is + expected, but was not properly copied. "CustomScriptNotFound" exception + is raised in such cases. + """ + # Custom script does not exist. + preCust = PreCustomScript("random-vmw-test", self.tmpDir) + self.assertEqual("random-vmw-test", preCust.scriptname) + self.assertEqual(self.tmpDir, preCust.directory) + self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), + preCust.scriptpath) + with self.assertRaises(CustomScriptNotFound): + preCust.prepare_script() + + # Custom script exists. + custScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(custScript, "test-CR-strip/r/r") + postCust = PostCustomScript("test-cust", self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + self.assertFalse(postCust.postreboot) + postCust.prepare_script() + # Check if all carraige returns are stripped from script. + self.assertFalse("/r" in custScript) + + def test_rc_local_exists(self): + """ + This test is designed to verify the different scenarios associated + with the presence of rclocal. + """ + # test when rc local does not exist + postCust = PostCustomScript("test-cust", self.tmpDir) + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", "/no/path"): + rclocal = postCust.find_rc_local() + self.assertEqual("", rclocal) + + # test when rc local exists + rclocalFile = self.tmp_path("vmware-rclocal", self.tmpDir) + util.write_file(rclocalFile, "# Run post-reboot guest customization", + omode="w") + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalFile): + rclocal = postCust.find_rc_local() + self.assertEqual(rclocalFile, rclocal) + self.assertTrue(postCust.has_previous_agent, rclocal) + + # test when rc local is a symlink + rclocalLink = self.tmp_path("dummy-rclocal-link", self.tmpDir) + util.sym_link(rclocalFile, rclocalLink, True) + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalLink): + rclocal = postCust.find_rc_local() + self.assertEqual(rclocalFile, rclocal) + + def test_execute_post_cust(self): + """ + This test is to identify if rclocal was properly populated to be + run after reboot. + """ + customscript = self.tmp_path("vmware-post-cust-script", self.tmpDir) + rclocal = self.tmp_path("vmware-rclocal", self.tmpDir) + # Create a temporary rclocal file + open(customscript, "w") + util.write_file(rclocal, "tests\nexit 0", omode="w") + postCust = PostCustomScript("vmware-post-cust-script", self.tmpDir) + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocal): + # Test that guest customization agent is not installed initially. + self.assertFalse(postCust.postreboot) + self.assertIs(postCust.has_previous_agent(rclocal), False) + postCust.install_agent() + + # Assert rclocal has been modified to have guest customization + # agent. + self.assertTrue(postCust.postreboot) + self.assertTrue(postCust.has_previous_agent, rclocal) + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 808d303a..036f6879 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -133,7 +133,8 @@ class TestVmwareConfigFile(CiTestCase): conf = Config(cf) with self.assertRaises(ValueError): - conf.reset_password() + pw = conf.reset_password + self.assertIsNone(pw) cf.clear() cf._insertKey("PASSWORD|RESET", "yes") @@ -334,5 +335,12 @@ class TestVmwareConfigFile(CiTestCase): self.assertEqual('255.255.0.0', subnet.get('netmask'), 'Subnet netmask') + def test_custom_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.custom_script_name) + cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script") + conf = Config(cf) + self.assertEqual("test-script", conf.custom_script_name) # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index ee5e05a4..cd268242 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -83,6 +83,7 @@ _DI_LOGGED="" # set DI_MAIN='noop' in environment to source this file with no main called. DI_MAIN=${DI_MAIN:-main} +DI_BLKID_OUTPUT="" DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" DI_DMI_CHASSIS_ASSET_TAG="" @@ -91,6 +92,7 @@ DI_DMI_SYS_VENDOR="" DI_DMI_PRODUCT_SERIAL="" DI_DMI_PRODUCT_UUID="" DI_FS_LABELS="" +DI_ISO9660_DEVS="" DI_KERNEL_CMDLINE="" DI_VIRT="" DI_PID_1_PRODUCT_NAME="" @@ -181,32 +183,43 @@ block_dev_with_label() { return 0 } -read_fs_labels() { - cached "${DI_FS_LABELS}" && return 0 +read_fs_info() { + cached "${DI_BLKID_OUTPUT}" && return 0 # do not rely on links in /dev/disk which might not be present yet. # note that older blkid versions do not report DEVNAME in 'export' output. - local out="" ret=0 oifs="$IFS" line="" delim="," - local labels="" if is_container; then # blkid will in a container, or at least currently in lxd # not provide useful information. DI_FS_LABELS="$UNAVAILABLE:container" - else - out=$(blkid -c /dev/null -o export) || { - ret=$? - error "failed running [$ret]: blkid -c /dev/null -o export" - return $ret - } - IFS="$CR" - set -- $out - IFS="$oifs" - for line in "$@"; do - case "${line}" in - LABEL=*) labels="${labels}${line#LABEL=}${delim}";; - esac - done - DI_FS_LABELS="${labels%${delim}}" + DI_ISO9660_DEVS="$UNAVAILABLE:container" + return fi + local oifs="$IFS" line="" delim="," + local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" + out=$(blkid -c /dev/null -o export) || { + ret=$? + error "failed running [$ret]: blkid -c /dev/null -o export" + DI_FS_LABELS="$UNAVAILABLE:error" + DI_ISO9660_DEVS="$UNAVAILABLE:error" + return $ret + } + IFS="$CR" + set -- $out + IFS="$oifs" + for line in "$@" ""; do + case "${line}" in + DEVNAME=*) dev=${line#DEVNAME=};; + LABEL=*) label="${line#LABEL=}"; + labels="${labels}${line#LABEL=}${delim}";; + TYPE=*) ftype=${line#TYPE=};; + "") if [ "$ftype" = "iso9660" ]; then + isodevs="${isodevs} ${dev}=$label" + fi + ftype=""; devname=""; label=""; + esac + done + DI_FS_LABELS="${labels%${delim}}" + DI_ISO9660_DEVS="${isodevs# }" } cached() { @@ -214,10 +227,6 @@ cached() { } -has_cdrom() { - [ -e "${PATH_ROOT}/dev/cdrom" ] -} - detect_virt() { local virt="${UNAVAILABLE}" r="" out="" if [ -d /run/systemd ]; then @@ -570,6 +579,8 @@ dscheck_NoCloud() { check_configdrive_v2() { if has_fs_with_label "config-2"; then return ${DS_FOUND} + elif has_fs_with_label "CONFIG-2"; then + return ${DS_FOUND} fi # look in /config-drive <vlc>/seed/config_drive for a directory # openstack/YYYY-MM-DD format with a file meta_data.json @@ -621,14 +632,13 @@ ovf_vmware_guest_customization() { [ "${DI_VIRT}" = "vmware" ] || return 1 # we have to have the plugin to do vmware customization - local found="" pkg="" pre="/usr/lib" + local found="" pkg="" pre="${PATH_ROOT}/usr/lib" for pkg in vmware-tools open-vm-tools; do if [ -f "$pre/$pkg/plugins/vmsvc/libdeployPkgPlugin.so" ]; then found="$pkg"; break; fi done [ -n "$found" ] || return 1 - # vmware customization is disabled by default # (disable_vmware_customization=true). If it is set to false, then # user has requested customization. @@ -644,20 +654,57 @@ ovf_vmware_guest_customization() { return 1 } +is_cdrom_ovf() { + local dev="$1" label="$2" + # skip devices that don't look like cdrom paths. + case "$dev" in + /dev/sr[0-9]|/dev/hd[a-z]) :;; + *) debug 1 "skipping iso dev $dev" + return 1;; + esac + + # fast path known 'OVF' labels + case "$label" in + OVF-TRANSPORT|ovf-transport|OVFENV|ovfenv) return 0;; + esac + + # explicitly skip known labels of other types. rd_rdfe is azure. + case "$label" in + config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; + esac + + local idstr="http://schemas.dmtf.org/ovf/environment/1" + grep --quiet --ignore-case "$idstr" "${PATH_ROOT}$dev" +} + dscheck_OVF() { - local p="" check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" + [ "${DI_VIRT}" = "none" ] && return ${DS_NOT_FOUND} + + # Azure provides ovf. Skip false positive by dis-allowing. + is_azure_chassis && return $DS_NOT_FOUND + + local isodevs="${DI_ISO9660_DEVS}" + case "$isodevs" in + ""|$UNAVAILABLE:*) return ${DS_NOT_FOUND};; + esac + + # DI_ISO9660_DEVS is <device>=label, like /dev/sr0=OVF-TRANSPORT + for tok in $isodevs; do + is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND + done + if ovf_vmware_guest_customization; then return ${DS_FOUND} fi - has_cdrom || return ${DS_NOT_FOUND} + return ${DS_NOT_FOUND} +} - # FIXME: currently just return maybe if there is a cdrom - # ovf iso9660 transport does not specify an fs label. - # better would be to check if - return ${DS_MAYBE} +is_azure_chassis() { + local azure_chassis="7783-7084-3265-9085-8269-3286-77" + dmi_chassis_asset_tag_matches "${azure_chassis}" } dscheck_Azure() { @@ -667,8 +714,7 @@ dscheck_Azure() { # UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209" # TYPE="udf">/dev/sr0</device> # - local azure_chassis="7783-7084-3265-9085-8269-3286-77" - dmi_chassis_asset_tag_matches "${azure_chassis}" && return $DS_FOUND + is_azure_chassis && return $DS_FOUND check_seed_dir azure ovf-env.xml && return ${DS_FOUND} [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} @@ -930,7 +976,7 @@ collect_info() { read_dmi_product_name read_dmi_product_serial read_dmi_product_uuid - read_fs_labels + read_fs_info } print_info() { @@ -942,7 +988,7 @@ _print_info() { local n="" v="" vars="" vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG" - vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" + vars="$vars FS_LABELS ISO9660_DEVS KERNEL_CMDLINE VIRT" vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" vars="$vars DSNAME DSLIST" diff --git a/tools/hacking.py b/tools/hacking.py deleted file mode 100755 index e6a05136..00000000 --- a/tools/hacking.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2012, Cloudscaling -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""cloudinit HACKING file compliance testing (based off of nova hacking.py) - -built on top of pep8.py -""" - -import inspect -import logging -import re -import sys - -import pep8 - -# Don't need this for testing -logging.disable('LOG') - -# N1xx comments -# N2xx except -# N3xx imports -# N4xx docstrings -# N[5-9]XX (future use) - -DOCSTRING_TRIPLE = ['"""', "'''"] -VERBOSE_MISSING_IMPORT = False -_missingImport = set([]) - - -def import_normalize(line): - # convert "from x import y" to "import x.y" - # handle "from x import y as z" to "import x.y as z" - split_line = line.split() - if (line.startswith("from ") and "," not in line and - split_line[2] == "import" and split_line[3] != "*" and - split_line[1] != "__future__" and - (len(split_line) == 4 or (len(split_line) == 6 and - split_line[4] == "as"))): - return "import %s.%s" % (split_line[1], split_line[3]) - else: - return line - - -def cloud_import_alphabetical(physical_line, line_number, lines): - """Check for imports in alphabetical order. - - HACKING guide recommendation for imports: - imports in human alphabetical order - N306 - """ - # handle import x - # use .lower since capitalization shouldn't dictate order - split_line = import_normalize(physical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2]) - split_previous = split_previous.strip().lower().split() - # with or without "as y" - length = [2, 4] - if (len(split_line) in length and len(split_previous) in length and - split_line[0] == "import" and split_previous[0] == "import"): - if split_line[1] < split_previous[1]: - return (0, "N306: imports not in alphabetical order (%s, %s)" - % (split_previous[1], split_line[1])) - - -def cloud_docstring_start_space(physical_line): - """Check for docstring not start with space. - - HACKING guide recommendation for docstring: - Docstring should not start with space - N401 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - if (pos != -1 and len(physical_line) > pos + 1): - if (physical_line[pos + 3] == ' '): - return (pos, - "N401: one line docstring should not start with a space") - - -def cloud_todo_format(physical_line): - """Check for 'TODO()'. - - HACKING guide recommendation for TODO: - Include your name with TODOs as in "#TODO(termie)" - N101 - """ - pos = physical_line.find('TODO') - pos1 = physical_line.find('TODO(') - pos2 = physical_line.find('#') # make sure it's a comment - if (pos != pos1 and pos2 >= 0 and pos2 < pos): - return pos, "N101: Use TODO(NAME)" - - -def cloud_docstring_one_line(physical_line): - """Check one line docstring end. - - HACKING guide recommendation for one line docstring: - A one line docstring looks like this and ends in a period. - N402 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end - if (pos != -1 and end and len(physical_line) > pos + 4): - if (physical_line[-5] != '.'): - return pos, "N402: one line docstring needs a period" - - -def cloud_docstring_multiline_end(physical_line): - """Check multi line docstring end. - - HACKING guide recommendation for docstring: - Docstring should end on a new line - N403 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - if (pos != -1 and len(physical_line) == pos): - print(physical_line) - if (physical_line[pos + 3] == ' '): - return (pos, "N403: multi line docstring end on new line") - - -current_file = "" - - -def readlines(filename): - """Record the current file being tested.""" - pep8.current_file = filename - return open(filename).readlines() - - -def add_cloud(): - """Monkey patch pep8 for cloud-init guidelines. - - Look for functions that start with cloud_ - and add them to pep8 module. - - Assumes you know how to write pep8.py checks - """ - for name, function in globals().items(): - if not inspect.isfunction(function): - continue - if name.startswith("cloud_"): - exec("pep8.%s = %s" % (name, name)) - - -if __name__ == "__main__": - # NOVA based 'hacking.py' error codes start with an N - pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}') - add_cloud() - pep8.current_file = current_file - pep8.readlines = readlines - try: - pep8._main() - finally: - if len(_missingImport) > 0: - sys.stderr.write( - "%i imports missing in this test environment\n" % - len(_missingImport)) - -# vi: ts=4 expandtab diff --git a/tools/make-mime.py b/tools/make-mime.py index f6a72044..d321479b 100755 --- a/tools/make-mime.py +++ b/tools/make-mime.py @@ -23,7 +23,7 @@ def file_content_type(text): filename, content_type = text.split(":", 1) return (open(filename, 'r'), filename, content_type.strip()) except ValueError: - raise argparse.ArgumentError("Invalid value for %r" % (text)) + raise argparse.ArgumentError(text, "Invalid value for %r" % (text)) def main(): diff --git a/tools/mock-meta.py b/tools/mock-meta.py index a5d14ab7..724f7fc4 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -17,6 +17,7 @@ Then: ec2metadata --instance-id """ +import argparse import functools import json import logging @@ -27,8 +28,6 @@ import string import sys import yaml -from optparse import OptionParser - try: from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler import httplib as hclient @@ -415,29 +414,27 @@ def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'): def extract_opts(): - parser = OptionParser() - parser.add_option("-p", "--port", dest="port", action="store", type=int, - default=80, metavar="PORT", - help=("port from which to serve traffic" - " (default: %default)")) - parser.add_option("-a", "--addr", dest="address", action="store", type=str, - default='::', metavar="ADDRESS", - help=("address from which to serve traffic" - " (default: %default)")) - parser.add_option("-f", '--user-data-file', dest='user_data_file', - action='store', metavar='FILE', - help=("user data filename to serve back to" - "incoming requests")) - (options, args) = parser.parse_args() - out = dict() - out['extra'] = args - out['port'] = options.port - out['user_data_file'] = None - out['address'] = options.address - if options.user_data_file: - if not os.path.isfile(options.user_data_file): + parser = argparse.ArgumentParser() + parser.add_argument("-p", "--port", dest="port", action="store", type=int, + default=80, metavar="PORT", + help=("port from which to serve traffic" + " (default: %default)")) + parser.add_argument("-a", "--addr", dest="address", action="store", + type=str, default='::', metavar="ADDRESS", + help=("address from which to serve traffic" + " (default: %default)")) + parser.add_argument("-f", '--user-data-file', dest='user_data_file', + action='store', metavar='FILE', + help=("user data filename to serve back to" + "incoming requests")) + parser.add_argument('extra', nargs='*') + args = parser.parse_args() + out = {'port': args.port, 'address': args.address, 'extra': args.extra, + 'user_data_file': None} + if args.user_data_file: + if not os.path.isfile(args.user_data_file): parser.error("Option -f specified a non-existent file") - with open(options.user_data_file, 'rb') as fh: + with open(args.user_data_file, 'rb') as fh: out['user_data_file'] = fh.read() return out diff --git a/tools/read-version b/tools/read-version index d9ed30da..3ea9e66e 100755 --- a/tools/read-version +++ b/tools/read-version @@ -45,6 +45,19 @@ def which(program): return None +def is_gitdir(path): + # Return boolean indicating if path is a git tree. + git_meta = os.path.join(path, '.git') + if os.path.isdir(git_meta): + return True + if os.path.exists(git_meta): + # in a git worktree, .git is a file with 'gitdir: x' + with open(git_meta, "rb") as fp: + if b'gitdir:' in fp.read(): + return True + return False + + use_long = '--long' in sys.argv or os.environ.get('CI_RV_LONG') use_tags = '--tags' in sys.argv or os.environ.get('CI_RV_TAGS') output_json = '--json' in sys.argv @@ -52,7 +65,7 @@ output_json = '--json' in sys.argv src_version = ci_version.version_string() version_long = None -if os.path.isdir(os.path.join(_tdir, ".git")) and which("git"): +if is_gitdir(_tdir) and which("git"): flags = [] if use_tags: flags = ['--tags'] @@ -21,12 +21,13 @@ setenv = LC_ALL = en_US.utf-8 [testenv:pylint] +basepython = python3 deps = # requirements - pylint==1.7.1 + pylint==1.8.1 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt -commands = {envpython} -m pylint {posargs:cloudinit} +commands = {envpython} -m pylint {posargs:cloudinit tests tools} [testenv:py3] basepython = python3 @@ -119,7 +120,7 @@ commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} deps = pyflakes [testenv:tip-pylint] -commands = {envpython} -m pylint {posargs:cloudinit} +commands = {envpython} -m pylint {posargs:cloudinit tests tools} deps = # requirements pylint @@ -131,6 +132,4 @@ basepython = python3 commands = {envpython} -m tests.cloud_tests {posargs} passenv = HOME deps = - pylxd==2.2.4 - paramiko==2.3.1 - bzr+lp:simplestreams + -r{toxinidir}/integration-requirements.txt |