From 30b4d15764a1a9644379cf95770e8b2480856882 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 5 Dec 2017 16:25:11 -0700 Subject: cli: Add clean and status subcommands The 'cloud-init clean' command allows a user or script to clear cloud-init artifacts from the system so that cloud-init sees the system as unconfigured upon reboot. Optional parameters can be provided to remove cloud-init logs and reboot after clean. The 'cloud-init status' command allows the user or script to check whether cloud-init has finished all configuration stages and whether errors occurred. An optional --wait argument will poll on a 0.25 second interval until cloud-init configuration is complete. The benefit here is scripts can block on cloud-init completion before performing post-config tasks. --- cloudinit/cmd/clean.py | 102 +++++++++++ cloudinit/cmd/main.py | 18 ++ cloudinit/cmd/status.py | 157 +++++++++++++++++ cloudinit/cmd/tests/__init__.py | 0 cloudinit/cmd/tests/test_clean.py | 159 +++++++++++++++++ cloudinit/cmd/tests/test_status.py | 353 +++++++++++++++++++++++++++++++++++++ cloudinit/distros/__init__.py | 16 +- cloudinit/util.py | 26 +++ 8 files changed, 826 insertions(+), 5 deletions(-) create mode 100644 cloudinit/cmd/clean.py create mode 100644 cloudinit/cmd/status.py create mode 100644 cloudinit/cmd/tests/__init__.py create mode 100644 cloudinit/cmd/tests/test_clean.py create mode 100644 cloudinit/cmd/tests/test_status.py (limited to 'cloudinit') diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py new file mode 100644 index 00000000..81797b1c --- /dev/null +++ b/cloudinit/cmd/clean.py @@ -0,0 +1,102 @@ +# Copyright (C) 2017 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Define 'clean' utility and handler as part of cloud-init commandline.""" + +import argparse +import os +import sys + +from cloudinit.stages import Init +from cloudinit.util import ( + ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, subp) + + +def error(msg): + sys.stderr.write("ERROR: " + msg + "\n") + + +def get_parser(parser=None): + """Build or extend an arg parser for clean utility. + + @param parser: Optional existing ArgumentParser instance representing the + clean subcommand which will be extended to support the args of + this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser( + prog='clean', + description=('Remove logs and artifacts so cloud-init re-runs on ' + 'a clean system')) + parser.add_argument( + '-l', '--logs', action='store_true', default=False, dest='remove_logs', + help='Remove cloud-init logs.') + parser.add_argument( + '-r', '--reboot', action='store_true', default=False, + help='Reboot system after logs are cleaned so cloud-init re-runs.') + parser.add_argument( + '-s', '--seed', action='store_true', default=False, dest='remove_seed', + help='Remove cloud-init seed directory /var/lib/cloud/seed.') + return parser + + +def remove_artifacts(remove_logs, remove_seed=False): + """Helper which removes artifacts dir and optionally log files. + + @param: remove_logs: Boolean. Set True to delete the cloud_dir path. False + preserves them. + @param: remove_seed: Boolean. Set True to also delete seed subdir in + paths.cloud_dir. + @returns: 0 on success, 1 otherwise. + """ + init = Init(ds_deps=[]) + init.read_cfg() + if remove_logs: + for log_file in get_config_logfiles(init.cfg): + del_file(log_file) + + if not os.path.isdir(init.paths.cloud_dir): + return 0 # Artifacts dir already cleaned + with chdir(init.paths.cloud_dir): + for path in os.listdir('.'): + if path == 'seed' and not remove_seed: + continue + try: + if os.path.isdir(path): + del_dir(path) + else: + del_file(path) + except OSError as e: + error('Could not remove {0}: {1}'.format(path, str(e))) + return 1 + return 0 + + +def handle_clean_args(name, args): + """Handle calls to 'cloud-init clean' as a subcommand.""" + exit_code = remove_artifacts(args.remove_logs, args.remove_seed) + if exit_code == 0 and args.reboot: + cmd = ['shutdown', '-r', 'now'] + try: + subp(cmd, capture=False) + except ProcessExecutionError as e: + error( + 'Could not reboot this system using "{0}": {1}'.format( + cmd, str(e))) + exit_code = 1 + return exit_code + + +def main(): + """Tool to collect and tar all cloud-init related logs.""" + parser = get_parser() + sys.exit(handle_clean_args('clean', parser.parse_args())) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 6fb9d9e7..aa56225d 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -767,6 +767,12 @@ def main(sysv_args=None): parser_collect_logs = subparsers.add_parser( 'collect-logs', help='Collect and tar all cloud-init debug info') + parser_clean = subparsers.add_parser( + 'clean', help='Remove logs and artifacts so cloud-init can re-run.') + + parser_status = subparsers.add_parser( + 'status', help='Report cloud-init status or wait on completion.') + if sysv_args: # Only load subparsers if subcommand is specified to avoid load cost if sysv_args[0] == 'analyze': @@ -783,6 +789,18 @@ def main(sysv_args=None): logs_parser(parser_collect_logs) parser_collect_logs.set_defaults( action=('collect-logs', handle_collect_logs_args)) + elif sysv_args[0] == 'clean': + from cloudinit.cmd.clean import ( + get_parser as clean_parser, handle_clean_args) + clean_parser(parser_clean) + parser_clean.set_defaults( + action=('clean', handle_clean_args)) + elif sysv_args[0] == 'status': + from cloudinit.cmd.status import ( + get_parser as status_parser, handle_status_args) + status_parser(parser_status) + parser_status.set_defaults( + action=('status', handle_status_args)) args = parser.parse_args(args=sysv_args) diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py new file mode 100644 index 00000000..3e5d0d07 --- /dev/null +++ b/cloudinit/cmd/status.py @@ -0,0 +1,157 @@ +# Copyright (C) 2017 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Define 'status' utility and handler as part of cloud-init commandline.""" + +import argparse +import os +import sys +from time import gmtime, strftime, sleep + +from cloudinit.distros import uses_systemd +from cloudinit.stages import Init +from cloudinit.util import get_cmdline, load_file, load_json + +CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' + +# customer visible status messages +STATUS_ENABLED_NOT_RUN = 'not run' +STATUS_RUNNING = 'running' +STATUS_DONE = 'done' +STATUS_ERROR = 'error' +STATUS_DISABLED = 'disabled' + + +def get_parser(parser=None): + """Build or extend an arg parser for status utility. + + @param parser: Optional existing ArgumentParser instance representing the + status subcommand which will be extended to support the args of + this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser( + prog='status', + description='Report run status of cloud init') + parser.add_argument( + '-l', '--long', action='store_true', default=False, + help=('Report long format of statuses including run stage name and' + ' error messages')) + parser.add_argument( + '-w', '--wait', action='store_true', default=False, + help='Block waiting on cloud-init to complete') + return parser + + +def handle_status_args(name, args): + """Handle calls to 'cloud-init status' as a subcommand.""" + # Read configured paths + init = Init(ds_deps=[]) + init.read_cfg() + + status, status_detail, time = _get_status_details(init.paths) + if args.wait: + while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): + sys.stdout.write('.') + sys.stdout.flush() + status, status_detail, time = _get_status_details(init.paths) + sleep(0.25) + sys.stdout.write('\n') + if args.long: + print('status: {0}'.format(status)) + if time: + print('time: {0}'.format(time)) + print('detail:\n{0}'.format(status_detail)) + else: + print('status: {0}'.format(status)) + return 1 if status == STATUS_ERROR else 0 + + +def _is_cloudinit_disabled(disable_file, paths): + """Report whether cloud-init is disabled. + + @param disable_file: The path to the cloud-init disable file. + @param paths: An initialized cloudinit.helpers.Paths object. + @returns: A tuple containing (bool, reason) about cloud-init's status and + why. + """ + is_disabled = False + cmdline_parts = get_cmdline().split() + if not uses_systemd(): + reason = 'Cloud-init enabled on sysvinit' + elif 'cloud-init=enabled' in cmdline_parts: + reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' + elif os.path.exists(disable_file): + is_disabled = True + reason = 'Cloud-init disabled by {0}'.format(disable_file) + elif 'cloud-init=disabled' in cmdline_parts: + is_disabled = True + reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' + elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): + is_disabled = True + reason = 'Cloud-init disabled by cloud-init-generator' + return (is_disabled, reason) + + +def _get_status_details(paths): + """Return a 3-tuple of status, status_details and time of last event. + + @param paths: An initialized cloudinit.helpers.paths object. + + Values are obtained from parsing paths.run_dir/status.json. + """ + + status = STATUS_ENABLED_NOT_RUN + status_detail = '' + status_v1 = {} + + status_file = os.path.join(paths.run_dir, 'status.json') + + (is_disabled, reason) = _is_cloudinit_disabled( + CLOUDINIT_DISABLED_FILE, paths) + if is_disabled: + status = STATUS_DISABLED + status_detail = reason + if os.path.exists(status_file): + status_v1 = load_json(load_file(status_file)).get('v1', {}) + errors = [] + latest_event = 0 + for key, value in sorted(status_v1.items()): + if key == 'stage': + if value: + status_detail = 'Running in stage: {0}'.format(value) + elif key == 'datasource': + status_detail = value + elif isinstance(value, dict): + errors.extend(value.get('errors', [])) + finished = value.get('finished') or 0 + if finished == 0: + status = STATUS_RUNNING + event_time = max(value.get('start', 0), finished) + if event_time > latest_event: + latest_event = event_time + if errors: + status = STATUS_ERROR + status_detail = '\n'.join(errors) + elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: + status = STATUS_DONE + if latest_event: + time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) + else: + time = '' + return status, status_detail, time + + +def main(): + """Tool to report status of cloud-init.""" + parser = get_parser() + sys.exit(handle_status_args('status', parser.parse_args())) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/__init__.py b/cloudinit/cmd/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py new file mode 100644 index 00000000..af438aab --- /dev/null +++ b/cloudinit/cmd/tests/test_clean.py @@ -0,0 +1,159 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.cmd import clean +from cloudinit.util import ensure_dir, write_file +from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock +from collections import namedtuple +import os +from six import StringIO + +mypaths = namedtuple('MyPaths', 'cloud_dir') + + +class TestClean(CiTestCase): + + def setUp(self): + super(TestClean, self).setUp() + self.new_root = self.tmp_dir() + self.artifact_dir = self.tmp_path('artifacts', self.new_root) + self.log1 = self.tmp_path('cloud-init.log', self.new_root) + self.log2 = self.tmp_path('cloud-init-output.log', self.new_root) + + class FakeInit(object): + cfg = {'def_log_file': self.log1, + 'output': {'all': '|tee -a {0}'.format(self.log2)}} + paths = mypaths(cloud_dir=self.artifact_dir) + + def __init__(self, ds_deps): + pass + + def read_cfg(self): + pass + + self.init_class = FakeInit + + def test_remove_artifacts_removes_logs(self): + """remove_artifacts removes logs when remove_logs is True.""" + write_file(self.log1, 'cloud-init-log') + write_file(self.log2, 'cloud-init-output-log') + + self.assertFalse( + os.path.exists(self.artifact_dir), 'Unexpected artifacts dir') + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=True) + self.assertFalse(os.path.exists(self.log1), 'Unexpected file') + self.assertFalse(os.path.exists(self.log2), 'Unexpected file') + self.assertEqual(0, retcode) + + def test_remove_artifacts_preserves_logs(self): + """remove_artifacts leaves logs when remove_logs is False.""" + write_file(self.log1, 'cloud-init-log') + write_file(self.log2, 'cloud-init-output-log') + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertTrue(os.path.exists(self.log1), 'Missing expected file') + self.assertTrue(os.path.exists(self.log2), 'Missing expected file') + self.assertEqual(0, retcode) + + def test_remove_artifacts_removes_artifacts_skipping_seed(self): + """remove_artifacts cleans artifacts dir with exception of seed dir.""" + dirs = [ + self.artifact_dir, + os.path.join(self.artifact_dir, 'seed'), + os.path.join(self.artifact_dir, 'dir1'), + os.path.join(self.artifact_dir, 'dir2')] + for _dir in dirs: + ensure_dir(_dir) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(0, retcode) + for expected_dir in dirs[:2]: + self.assertTrue( + os.path.exists(expected_dir), + 'Missing {0} dir'.format(expected_dir)) + for deleted_dir in dirs[2:]: + self.assertFalse( + os.path.exists(deleted_dir), + 'Unexpected {0} dir'.format(deleted_dir)) + + def test_remove_artifacts_removes_artifacts_removes_seed(self): + """remove_artifacts removes seed dir when remove_seed is True.""" + dirs = [ + self.artifact_dir, + os.path.join(self.artifact_dir, 'seed'), + os.path.join(self.artifact_dir, 'dir1'), + os.path.join(self.artifact_dir, 'dir2')] + for _dir in dirs: + ensure_dir(_dir) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False, remove_seed=True) + self.assertEqual(0, retcode) + self.assertTrue( + os.path.exists(self.artifact_dir), 'Missing artifact dir') + for deleted_dir in dirs[1:]: + self.assertFalse( + os.path.exists(deleted_dir), + 'Unexpected {0} dir'.format(deleted_dir)) + + def test_remove_artifacts_returns_one_on_errors(self): + """remove_artifacts returns non-zero on failure and prints an error.""" + ensure_dir(self.artifact_dir) + ensure_dir(os.path.join(self.artifact_dir, 'dir1')) + + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'del_dir': {'side_effect': OSError('oops')}, + 'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(1, retcode) + self.assertEqual( + 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue()) + + def test_handle_clean_args_reboots(self): + """handle_clean_args_reboots when reboot arg is provided.""" + + called_cmds = [] + + def fake_subp(cmd, capture): + called_cmds.append((cmd, capture)) + return '', '' + + myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot') + cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True) + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'subp': {'side_effect': fake_subp}, + 'Init': {'side_effect': self.init_class}}, + clean.handle_clean_args, name='does not matter', args=cmdargs) + self.assertEqual(0, retcode) + self.assertEqual( + [(['shutdown', '-r', 'now'], False)], called_cmds) + + def test_status_main(self): + '''clean.main can be run as a standalone script.''' + write_file(self.log1, 'cloud-init-log') + with self.assertRaises(SystemExit) as context_manager: + wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}, + 'sys.argv': {'new': ['clean', '--logs']}}, + clean.main) + + self.assertEqual(0, context_manager.exception.code) + self.assertFalse( + os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) + + +# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py new file mode 100644 index 00000000..8ec9b5bc --- /dev/null +++ b/cloudinit/cmd/tests/test_status.py @@ -0,0 +1,353 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +import os +from six import StringIO +from textwrap import dedent + +from cloudinit.atomic_helper import write_json +from cloudinit.cmd import status +from cloudinit.util import write_file +from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock + +mypaths = namedtuple('MyPaths', 'run_dir') +myargs = namedtuple('MyArgs', 'long wait') + + +class TestStatus(CiTestCase): + + def setUp(self): + super(TestStatus, self).setUp() + self.new_root = self.tmp_dir() + self.status_file = self.tmp_path('status.json', self.new_root) + self.disable_file = self.tmp_path('cloudinit-disable', self.new_root) + self.paths = mypaths(run_dir=self.new_root) + + class FakeInit(object): + paths = self.paths + + def __init__(self, ds_deps): + pass + + def read_cfg(self): + pass + + self.init_class = FakeInit + + def test__is_cloudinit_disabled_false_on_sysvinit(self): + '''When not in an environment using systemd, return False.''' + write_file(self.disable_file, '') # Create the ignored disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': False}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse( + is_disabled, 'expected enabled cloud-init on sysvinit') + self.assertEqual('Cloud-init enabled on sysvinit', reason) + + def test__is_cloudinit_disabled_true_on_disable_file(self): + '''When using systemd and disable_file is present return disabled.''' + write_file(self.disable_file, '') # Create observed disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual( + 'Cloud-init disabled by {0}'.format(self.disable_file), reason) + + def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): + '''Not disabled when using systemd and enabled via commandline.''' + write_file(self.disable_file, '') # Create ignored disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something cloud-init=enabled else'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse(is_disabled, 'expected enabled cloud-init') + self.assertEqual( + 'Cloud-init enabled by kernel command line cloud-init=enabled', + reason) + + def test__is_cloudinit_disabled_true_on_kernel_cmdline(self): + '''When using systemd and disable_file is present return disabled.''' + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something cloud-init=disabled else'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual( + 'Cloud-init disabled by kernel parameter cloud-init=disabled', + reason) + + def test__is_cloudinit_disabled_true_when_generator_disables(self): + '''When cloud-init-generator doesn't write enabled file return True.''' + enabled_file = os.path.join(self.paths.run_dir, 'enabled') + self.assertFalse(os.path.exists(enabled_file)) + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual('Cloud-init disabled by cloud-init-generator', reason) + + def test_status_returns_not_run(self): + '''When status.json does not exist yet, return 'not run'.''' + self.assertFalse( + os.path.exists(self.status_file), 'Unexpected status.json found') + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: not run\n', m_stdout.getvalue()) + + def test_status_returns_disabled_long_on_presence_of_disable_file(self): + '''When cloudinit is disabled, return disabled reason.''' + + checked_files = [] + + def fakeexists(filepath): + checked_files.append(filepath) + status_file = os.path.join(self.paths.run_dir, 'status.json') + return bool(not filepath == status_file) + + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'os.path.exists': {'side_effect': fakeexists}, + '_is_cloudinit_disabled': (True, 'disabled for some reason'), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual( + [os.path.join(self.paths.run_dir, 'status.json')], + checked_files) + expected = dedent('''\ + status: disabled + detail: + disabled for some reason + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_returns_running(self): + '''Report running when status file exists but isn't finished.''' + write_json(self.status_file, {'v1': {'init': {'finished': None}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: running\n', m_stdout.getvalue()) + + def test_status_returns_done(self): + '''Reports done when stage is None and all stages are finished.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'blah': {'finished': 123.456}, + 'init': {'errors': [], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: done\n', m_stdout.getvalue()) + + def test_status_returns_done_long(self): + '''Long format of done status includes datasource info.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'init': {'start': 124.567, 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + expected = dedent('''\ + status: done + time: Thu, 01 Jan 1970 00:02:05 +0000 + detail: + DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_on_errors(self): + '''Reports error when any stage has errors.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'blah': {'errors': [], 'finished': 123.456}, + 'init': {'errors': ['error1'], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + self.assertEqual('status: error\n', m_stdout.getvalue()) + + def test_status_on_errors_long(self): + '''Long format of error status includes all error messages.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'init': {'errors': ['error1'], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'errors': ['error2', 'error3'], + 'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + expected = dedent('''\ + status: error + time: Thu, 01 Jan 1970 00:02:05 +0000 + detail: + error1 + error2 + error3 + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_returns_running_long_format(self): + '''Long format reports the stage in which we are running.''' + write_json( + self.status_file, + {'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + expected = dedent('''\ + status: running + time: Thu, 01 Jan 1970 00:02:04 +0000 + detail: + Running in stage: init + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_wait_blocks_until_done(self): + '''Specifying wait will poll every 1/4 second until done state.''' + running_json = { + 'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + done_json = { + 'v1': {'stage': None, + 'init': {'start': 124.456, 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + + self.sleep_calls = 0 + + def fake_sleep(interval): + self.assertEqual(0.25, interval) + self.sleep_calls += 1 + if self.sleep_calls == 2: + write_json(self.status_file, running_json) + elif self.sleep_calls == 3: + write_json(self.status_file, done_json) + + cmdargs = myargs(long=False, wait=True) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'sleep': {'side_effect': fake_sleep}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual(4, self.sleep_calls) + self.assertEqual('....\nstatus: done\n', m_stdout.getvalue()) + + def test_status_wait_blocks_until_error(self): + '''Specifying wait will poll every 1/4 second until error state.''' + running_json = { + 'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + error_json = { + 'v1': {'stage': None, + 'init': {'errors': ['error1'], 'start': 124.456, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + + self.sleep_calls = 0 + + def fake_sleep(interval): + self.assertEqual(0.25, interval) + self.sleep_calls += 1 + if self.sleep_calls == 2: + write_json(self.status_file, running_json) + elif self.sleep_calls == 3: + write_json(self.status_file, error_json) + + cmdargs = myargs(long=False, wait=True) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'sleep': {'side_effect': fake_sleep}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + self.assertEqual(4, self.sleep_calls) + self.assertEqual('....\nstatus: error\n', m_stdout.getvalue()) + + def test_status_main(self): + '''status.main can be run as a standalone script.''' + write_json(self.status_file, {'v1': {'init': {'finished': None}}}) + with self.assertRaises(SystemExit) as context_manager: + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + wrap_and_call( + 'cloudinit.cmd.status', + {'sys.argv': {'new': ['status']}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.main) + self.assertEqual(0, context_manager.exception.code) + self.assertEqual('status: running\n', m_stdout.getvalue()) + +# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index d5becd12..99e60e7a 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -102,11 +102,8 @@ class Distro(object): self._apply_hostname(writeable_hostname) def uses_systemd(self): - try: - res = os.lstat('/run/systemd/system') - return stat.S_ISDIR(res.st_mode) - except Exception: - return False + """Wrapper to report whether this distro uses systemd or sysvinit.""" + return uses_systemd() @abc.abstractmethod def package_command(self, cmd, args=None, pkgs=None): @@ -761,4 +758,13 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", util.copy(tz_file, tz_local) return + +def uses_systemd(): + try: + res = os.lstat('/run/systemd/system') + return stat.S_ISDIR(res.st_mode) + except Exception: + return False + + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 6c014ba5..320d64e0 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1398,6 +1398,32 @@ def get_output_cfg(cfg, mode): return ret +def get_config_logfiles(cfg): + """Return a list of log file paths from the configuration dictionary. + + @param cfg: The cloud-init merged configuration dictionary. + """ + logs = [] + if not cfg or not isinstance(cfg, dict): + return logs + default_log = cfg.get('def_log_file') + if default_log: + logs.append(default_log) + for fmt in get_output_cfg(cfg, None): + if not fmt: + continue + match = re.match('(?P\||>+)\s*(?P.*)', fmt) + if not match: + continue + target = match.group('target') + parts = target.split() + if len(parts) == 1: + logs.append(target) + elif ['tee', '-a'] == parts[:2]: + logs.append(parts[2]) + return list(set(logs)) + + def logexc(log, msg, *args): # Setting this here allows this to change # levels easily (not always error level) -- cgit v1.2.3 From 0cf6db3617e0cebeb89c4809396f84360827e96c Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 5 Dec 2017 16:42:35 -0700 Subject: Datasources: Formalize DataSource get_data and related properties. Each DataSource subclass must define its own get_data method. This branch formalizes our DataSource class to require that subclasses define an explicit dsname for sourcing cloud-config datasource configuration. Subclasses must also override the _get_data method or a NotImplementedError is raised. The branch also writes /run/cloud-init/instance-data.json. This file contains all meta-data, user-data and vendor-data and a standardized set of metadata keys in a json blob which other utilities with root-access could make use of. Because some meta-data or user-data is potentially sensitive the file is only readable by root. Generally most metadata content types should be json serializable. If specific keys or values are not serializable, those specific values will be base64encoded and the key path will be listed under the top-level key 'base64-encoded-keys' in instance-data.json. If json writing fails due to other TypeErrors or UnicodeDecodeErrors, a warning log will be emitted to /var/log/cloud-init.log and no instance-data.json will be created. --- cloudinit/analyze/__main__.py | 4 +- cloudinit/analyze/dump.py | 8 +- cloudinit/sources/DataSourceAliYun.py | 1 + cloudinit/sources/DataSourceAltCloud.py | 5 +- cloudinit/sources/DataSourceAzure.py | 4 +- cloudinit/sources/DataSourceBigstep.py | 5 +- cloudinit/sources/DataSourceCloudSigma.py | 5 +- cloudinit/sources/DataSourceCloudStack.py | 5 +- cloudinit/sources/DataSourceConfigDrive.py | 5 +- cloudinit/sources/DataSourceDigitalOcean.py | 5 +- cloudinit/sources/DataSourceEc2.py | 12 +- cloudinit/sources/DataSourceGCE.py | 5 +- cloudinit/sources/DataSourceMAAS.py | 5 +- cloudinit/sources/DataSourceNoCloud.py | 5 +- cloudinit/sources/DataSourceNone.py | 5 +- cloudinit/sources/DataSourceOVF.py | 5 +- cloudinit/sources/DataSourceOpenNebula.py | 5 +- cloudinit/sources/DataSourceOpenStack.py | 5 +- cloudinit/sources/DataSourceScaleway.py | 4 +- cloudinit/sources/DataSourceSmartOS.py | 5 +- cloudinit/sources/__init__.py | 129 +++++++++++-- cloudinit/sources/tests/__init__.py | 0 cloudinit/sources/tests/test_init.py | 202 +++++++++++++++++++++ cloudinit/tests/helpers.py | 7 - cloudinit/util.py | 33 +++- tests/unittests/test_datasource/test_aliyun.py | 2 +- tests/unittests/test_datasource/test_altcloud.py | 22 ++- tests/unittests/test_datasource/test_azure.py | 28 +-- tests/unittests/test_datasource/test_cloudsigma.py | 13 +- tests/unittests/test_datasource/test_cloudstack.py | 19 +- .../unittests/test_datasource/test_configdrive.py | 3 +- .../unittests/test_datasource/test_digitalocean.py | 11 +- tests/unittests/test_datasource/test_ec2.py | 3 +- tests/unittests/test_datasource/test_gce.py | 3 +- tests/unittests/test_datasource/test_nocloud.py | 14 +- tests/unittests/test_datasource/test_opennebula.py | 12 +- tests/unittests/test_datasource/test_openstack.py | 12 +- tests/unittests/test_datasource/test_scaleway.py | 13 +- tests/unittests/test_datasource/test_smartos.py | 3 +- tests/unittests/test_ds_identify.py | 4 +- tests/unittests/test_runs/test_merge_run.py | 1 + tests/unittests/test_runs/test_simple_run.py | 3 +- 42 files changed, 517 insertions(+), 123 deletions(-) create mode 100644 cloudinit/sources/tests/__init__.py create mode 100644 cloudinit/sources/tests/test_init.py (limited to 'cloudinit') diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 69b9e43e..3ba5903f 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -6,6 +6,8 @@ import argparse import re import sys +from cloudinit.util import json_dumps + from . import dump from . import show @@ -112,7 +114,7 @@ def analyze_show(name, args): def analyze_dump(name, args): """Dump cloud-init events in json format""" (infh, outfh) = configure_io(args) - outfh.write(dump.json_dumps(_get_events(infh)) + '\n') + outfh.write(json_dumps(_get_events(infh)) + '\n') def _get_events(infile): diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index ca4da496..b071aa19 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -2,7 +2,6 @@ import calendar from datetime import datetime -import json import sys from cloudinit import util @@ -132,11 +131,6 @@ def parse_ci_logline(line): return event -def json_dumps(data): - return json.dumps(data, indent=1, sort_keys=True, - separators=(',', ': ')) - - def dump_events(cisource=None, rawdata=None): events = [] event = None @@ -169,7 +163,7 @@ def main(): else: cisource = sys.stdin - return json_dumps(dump_events(cisource)) + return util.json_dumps(dump_events(cisource)) if __name__ == "__main__": diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 43a7e42c..7ac8288d 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -11,6 +11,7 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS" class DataSourceAliYun(EC2.DataSourceEc2): + dsname = 'AliYun' metadata_urls = ['http://100.100.100.200'] # The minimum supported metadata_version from the ec2 metadata apis diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index c78ad9eb..be2d6cf8 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -74,6 +74,9 @@ def read_user_data_callback(mount_dir): class DataSourceAltCloud(sources.DataSource): + + dsname = 'AltCloud' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -112,7 +115,7 @@ class DataSourceAltCloud(sources.DataSource): return 'UNKNOWN' - def get_data(self): + def _get_data(self): ''' Description: User Data is passed to the launching instance which diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 14367e9c..6978d4e5 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -246,6 +246,8 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzure(sources.DataSource): + + dsname = 'Azure' _negotiated = False def __init__(self, sys_cfg, distro, paths): @@ -330,7 +332,7 @@ class DataSourceAzure(sources.DataSource): metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata - def get_data(self): + def _get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py index d7fcd45a..699a85b5 100644 --- a/cloudinit/sources/DataSourceBigstep.py +++ b/cloudinit/sources/DataSourceBigstep.py @@ -16,13 +16,16 @@ LOG = logging.getLogger(__name__) class DataSourceBigstep(sources.DataSource): + + dsname = 'Bigstep' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = {} self.vendordata_raw = "" self.userdata_raw = "" - def get_data(self, apply_filter=False): + def _get_data(self, apply_filter=False): url = get_url_from_file() if url is None: return False diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 19df16b1..4eaad475 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -23,6 +23,9 @@ class DataSourceCloudSigma(sources.DataSource): For more information about CloudSigma's Server Context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html """ + + dsname = 'CloudSigma' + def __init__(self, sys_cfg, distro, paths): self.cepko = Cepko() self.ssh_public_key = '' @@ -46,7 +49,7 @@ class DataSourceCloudSigma(sources.DataSource): LOG.warning("failed to query dmi data for system product name") return False - def get_data(self): + def _get_data(self): """ Metadata is the whole server context and /meta/cloud-config is used as userdata. diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 9dc473fc..0df545fc 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -65,6 +65,9 @@ class CloudStackPasswordServerClient(object): class DataSourceCloudStack(sources.DataSource): + + dsname = 'CloudStack' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'cs') @@ -117,7 +120,7 @@ class DataSourceCloudStack(sources.DataSource): def get_config_obj(self): return self.cfg - def get_data(self): + def _get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): self.userdata_raw = seed_ret['user-data'] diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index ef374f3f..870b3688 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -32,6 +32,9 @@ OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): + + dsname = 'ConfigDrive' + def __init__(self, sys_cfg, distro, paths): super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) self.source = None @@ -50,7 +53,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): mstr += "[source=%s]" % (self.source) return mstr - def get_data(self): + def _get_data(self): found = None md = {} results = {} diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5e7e66be..e0ef665e 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -27,6 +27,9 @@ MD_USE_IPV4LL = True class DataSourceDigitalOcean(sources.DataSource): + + dsname = 'DigitalOcean' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro @@ -44,7 +47,7 @@ class DataSourceDigitalOcean(sources.DataSource): def _get_sysinfo(self): return do_helper.read_sysinfo() - def get_data(self): + def _get_data(self): (is_do, droplet_id) = self._get_sysinfo() # only proceed if we know we are on DigitalOcean diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 7bbbfb63..e5c88334 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -31,6 +31,7 @@ _unset = "_unset" class Platforms(object): + # TODO Rename and move to cloudinit.cloud.CloudNames ALIYUN = "AliYun" AWS = "AWS" BRIGHTBOX = "Brightbox" @@ -45,6 +46,7 @@ class Platforms(object): class DataSourceEc2(sources.DataSource): + dsname = 'Ec2' # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the # following may be discarded if they do not resolve @@ -68,11 +70,15 @@ class DataSourceEc2(sources.DataSource): _fallback_interface = None def __init__(self, sys_cfg, distro, paths): - sources.DataSource.__init__(self, sys_cfg, distro, paths) + super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None self.seed_dir = os.path.join(paths.seed_dir, "ec2") - def get_data(self): + def _get_cloud_name(self): + """Return the cloud name as identified during _get_data.""" + return self.cloud_platform + + def _get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): self.userdata_raw = seed_ret['user-data'] @@ -274,7 +280,7 @@ class DataSourceEc2(sources.DataSource): return None @property - def cloud_platform(self): + def cloud_platform(self): # TODO rename cloud_name if self._cloud_platform is None: self._cloud_platform = identify_platform() return self._cloud_platform diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index ccae4200..ad6dae37 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -42,6 +42,9 @@ class GoogleMetadataFetcher(object): class DataSourceGCE(sources.DataSource): + + dsname = 'GCE' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = dict() @@ -50,7 +53,7 @@ class DataSourceGCE(sources.DataSource): BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] - def get_data(self): + def _get_data(self): ret = util.log_time( LOG.debug, 'Crawl of GCE metadata service', read_md, kwargs={'address': self.metadata_address}) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 77df5a51..496bd06a 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -39,6 +39,9 @@ class DataSourceMAAS(sources.DataSource): hostname vendor-data """ + + dsname = "MAAS" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None @@ -62,7 +65,7 @@ class DataSourceMAAS(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [%s]" % (root, self.base_url) - def get_data(self): + def _get_data(self): mcfg = self.ds_cfg try: diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index e641244d..5d3a8ddb 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -20,6 +20,9 @@ LOG = logging.getLogger(__name__) class DataSourceNoCloud(sources.DataSource): + + dsname = "NoCloud" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -32,7 +35,7 @@ class DataSourceNoCloud(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) - def get_data(self): + def _get_data(self): defaults = { "instance-id": "nocloud", "dsmode": self.dsmode, diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index 906bb278..e63a7e39 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -11,12 +11,15 @@ LOG = logging.getLogger(__name__) class DataSourceNone(sources.DataSource): + + dsname = "None" + def __init__(self, sys_cfg, distro, paths, ud_proc=None): sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) self.metadata = {} self.userdata_raw = '' - def get_data(self): + def _get_data(self): # If the datasource config has any provided 'fallback' # userdata or metadata, use it... if 'userdata_raw' in self.ds_cfg: diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index ccebf11a..6ac621f2 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -43,6 +43,9 @@ LOG = logging.getLogger(__name__) class DataSourceOVF(sources.DataSource): + + dsname = "OVF" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -60,7 +63,7 @@ class DataSourceOVF(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) - def get_data(self): + def _get_data(self): found = [] md = {} ud = "" diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 5fdac192..5da11847 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -31,6 +31,9 @@ CONTEXT_DISK_FILES = ["context.sh"] class DataSourceOpenNebula(sources.DataSource): + + dsname = "OpenNebula" + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None @@ -40,7 +43,7 @@ class DataSourceOpenNebula(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) - def get_data(self): + def _get_data(self): defaults = {"instance-id": DEFAULT_IID} results = None seed = None diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index b64a7f24..e55a7638 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -24,6 +24,9 @@ DEFAULT_METADATA = { class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): + + dsname = "OpenStack" + def __init__(self, sys_cfg, distro, paths): super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) self.metadata_address = None @@ -96,7 +99,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.metadata_address = url2base.get(avail_url) return bool(avail_url) - def get_data(self): + def _get_data(self): try: if not self.wait_for_metadata_service(): return False diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 3a8a8e8f..b0b19c93 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -169,6 +169,8 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): + dsname = "Scaleway" + def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) @@ -184,7 +186,7 @@ class DataSourceScaleway(sources.DataSource): self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) - def get_data(self): + def _get_data(self): if not on_scaleway(): return False diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 6c6902fd..86bfa5d8 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -159,6 +159,9 @@ LEGACY_USER_D = "/var/db" class DataSourceSmartOS(sources.DataSource): + + dsname = "Joyent" + _unset = "_unset" smartos_type = _unset md_client = _unset @@ -211,7 +214,7 @@ class DataSourceSmartOS(sources.DataSource): os.rename('/'.join([svc_path, 'provisioning']), '/'.join([svc_path, 'provision_success'])) - def get_data(self): + def _get_data(self): self._init() md = {} diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 9a43fbee..4b819ce6 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -10,9 +10,11 @@ import abc import copy +import json import os import six +from cloudinit.atomic_helper import write_json from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils @@ -33,6 +35,12 @@ DEP_FILESYSTEM = "FILESYSTEM" DEP_NETWORK = "NETWORK" DS_PREFIX = 'DataSource' +# File in which instance meta-data, user-data and vendor-data is written +INSTANCE_JSON_FILE = 'instance-data.json' + +# Key which can be provide a cloud's official product name to cloud-init +METADATA_CLOUD_NAME_KEY = 'cloud-name' + LOG = logging.getLogger(__name__) @@ -40,12 +48,39 @@ class DataSourceNotFoundException(Exception): pass +def process_base64_metadata(metadata, key_path=''): + """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" + md_copy = copy.deepcopy(metadata) + md_copy['base64-encoded-keys'] = [] + for key, val in metadata.items(): + if key_path: + sub_key_path = key_path + '/' + key + else: + sub_key_path = key + if isinstance(val, str) and val.startswith('ci-b64:'): + md_copy['base64-encoded-keys'].append(sub_key_path) + md_copy[key] = val.replace('ci-b64:', '') + if isinstance(val, dict): + return_val = process_base64_metadata(val, sub_key_path) + md_copy['base64-encoded-keys'].extend( + return_val.pop('base64-encoded-keys')) + md_copy[key] = return_val + return md_copy + + @six.add_metaclass(abc.ABCMeta) class DataSource(object): dsmode = DSMODE_NETWORK default_locale = 'en_US.UTF-8' + # Datasource name needs to be set by subclasses to determine which + # cloud-config datasource key is loaded + dsname = '_undef' + + # Cached cloud_name as determined by _get_cloud_name + _cloud_name = None + def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro @@ -56,17 +91,8 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None - # find the datasource config name. - # remove 'DataSource' from classname on front, and remove 'Net' on end. - # Both Foo and FooNet sources expect config in cfg['sources']['Foo'] - name = type_utils.obj_name(self) - if name.startswith(DS_PREFIX): - name = name[len(DS_PREFIX):] - if name.endswith('Net'): - name = name[0:-3] - - self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, - ("datasource", name), {}) + self.ds_cfg = util.get_cfg_by_path( + self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: self.ds_cfg = {} @@ -78,6 +104,51 @@ class DataSource(object): def __str__(self): return type_utils.obj_name(self) + def _get_standardized_metadata(self): + """Return a dictionary of standardized metadata keys.""" + return {'v1': { + 'local-hostname': self.get_hostname(), + 'instance-id': self.get_instance_id(), + 'cloud-name': self.cloud_name, + 'region': self.region, + 'availability-zone': self.availability_zone}} + + def get_data(self): + """Datasources implement _get_data to setup metadata and userdata_raw. + + Minimally, the datasource should return a boolean True on success. + """ + return_value = self._get_data() + json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) + if not return_value: + return return_value + + instance_data = { + 'ds': { + 'meta-data': self.metadata, + 'user-data': self.get_userdata_raw(), + 'vendor-data': self.get_vendordata_raw()}} + instance_data.update( + self._get_standardized_metadata()) + try: + # Process content base64encoding unserializable values + content = util.json_dumps(instance_data) + # Strip base64: prefix and return base64-encoded-keys + processed_data = process_base64_metadata(json.loads(content)) + except TypeError as e: + LOG.warning('Error persisting instance-data.json: %s', str(e)) + return return_value + except UnicodeDecodeError as e: + LOG.warning('Error persisting instance-data.json: %s', str(e)) + return return_value + write_json(json_file, processed_data, mode=0o600) + return return_value + + def _get_data(self): + raise NotImplementedError( + 'Subclasses of DataSource must implement _get_data which' + ' sets self.metadata, vendordata_raw and userdata_raw.') + def get_userdata(self, apply_filter=False): if self.userdata is None: self.userdata = self.ud_proc.process(self.get_userdata_raw()) @@ -90,6 +161,34 @@ class DataSource(object): self.vendordata = self.ud_proc.process(self.get_vendordata_raw()) return self.vendordata + @property + def cloud_name(self): + """Return lowercase cloud name as determined by the datasource. + + Datasource can determine or define its own cloud product name in + metadata. + """ + if self._cloud_name: + return self._cloud_name + if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY): + cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) + if isinstance(cloud_name, six.string_types): + self._cloud_name = cloud_name.lower() + LOG.debug( + 'Ignoring metadata provided key %s: non-string type %s', + METADATA_CLOUD_NAME_KEY, type(cloud_name)) + else: + self._cloud_name = self._get_cloud_name().lower() + return self._cloud_name + + def _get_cloud_name(self): + """Return the datasource name as it frequently matches cloud name. + + Should be overridden in subclasses which can run on multiple + cloud names, such as DatasourceEc2. + """ + return self.dsname + @property def launch_index(self): if not self.metadata: @@ -161,8 +260,11 @@ class DataSource(object): @property def availability_zone(self): - return self.metadata.get('availability-zone', - self.metadata.get('availability_zone')) + top_level_az = self.metadata.get( + 'availability-zone', self.metadata.get('availability_zone')) + if top_level_az: + return top_level_az + return self.metadata.get('placement', {}).get('availability-zone') @property def region(self): @@ -417,4 +519,5 @@ def list_from_depends(depends, ds_list): ret_list.append(cls) return ret_list + # vi: ts=4 expandtab diff --git a/cloudinit/sources/tests/__init__.py b/cloudinit/sources/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py new file mode 100644 index 00000000..af151154 --- /dev/null +++ b/cloudinit/sources/tests/test_init.py @@ -0,0 +1,202 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import six +import stat + +from cloudinit.helpers import Paths +from cloudinit.sources import ( + INSTANCE_JSON_FILE, DataSource) +from cloudinit.tests.helpers import CiTestCase, skipIf +from cloudinit.user_data import UserDataProcessor +from cloudinit import util + + +class DataSourceTestSubclassNet(DataSource): + + dsname = 'MyTestSubclass' + + def __init__(self, sys_cfg, distro, paths, custom_userdata=None): + super(DataSourceTestSubclassNet, self).__init__( + sys_cfg, distro, paths) + self._custom_userdata = custom_userdata + + def _get_cloud_name(self): + return 'SubclassCloudName' + + def _get_data(self): + self.metadata = {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'} + if self._custom_userdata: + self.userdata_raw = self._custom_userdata + else: + self.userdata_raw = 'userdata_raw' + self.vendordata_raw = 'vendordata_raw' + return True + + +class InvalidDataSourceTestSubclassNet(DataSource): + pass + + +class TestDataSource(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestDataSource, self).setUp() + self.sys_cfg = {'datasource': {'_undef': {'key1': False}}} + self.distro = 'distrotest' # generally should be a Distro object + self.paths = Paths({}) + self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) + + def test_datasource_init(self): + """DataSource initializes metadata attributes, ds_cfg and ud_proc.""" + self.assertEqual(self.paths, self.datasource.paths) + self.assertEqual(self.sys_cfg, self.datasource.sys_cfg) + self.assertEqual(self.distro, self.datasource.distro) + self.assertIsNone(self.datasource.userdata) + self.assertEqual({}, self.datasource.metadata) + self.assertIsNone(self.datasource.userdata_raw) + self.assertIsNone(self.datasource.vendordata) + self.assertIsNone(self.datasource.vendordata_raw) + self.assertEqual({'key1': False}, self.datasource.ds_cfg) + self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor) + + def test_datasource_init_gets_ds_cfg_using_dsname(self): + """Init uses DataSource.dsname for sourcing ds_cfg.""" + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} + distro = 'distrotest' # generally should be a Distro object + paths = Paths({}) + datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths) + self.assertEqual({'key2': False}, datasource.ds_cfg) + + def test_str_is_classname(self): + """The string representation of the datasource is the classname.""" + self.assertEqual('DataSource', str(self.datasource)) + self.assertEqual( + 'DataSourceTestSubclassNet', + str(DataSourceTestSubclassNet('', '', self.paths))) + + def test__get_data_unimplemented(self): + """Raise an error when _get_data is not implemented.""" + with self.assertRaises(NotImplementedError) as context_manager: + self.datasource.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + datasource2 = InvalidDataSourceTestSubclassNet( + self.sys_cfg, self.distro, self.paths) + with self.assertRaises(NotImplementedError) as context_manager: + datasource2.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + + def test_get_data_calls_subclass__get_data(self): + """Datasource.get_data uses the subclass' version of _get_data.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + self.assertEqual( + {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + datasource.metadata) + self.assertEqual('userdata_raw', datasource.userdata_raw) + self.assertEqual('vendordata_raw', datasource.vendordata_raw) + + def test_get_data_write_json_instance_data(self): + """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected = { + 'base64-encoded-keys': [], + 'v1': { + 'availability-zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'instance-id': 'iid-datasource', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + 'ds': { + 'meta-data': {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + 'user-data': 'userdata_raw', + 'vendor-data': 'vendordata_raw'}} + self.assertEqual(expected, util.load_json(content)) + file_stat = os.stat(json_file) + self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) + + def test_get_data_handles_redacted_unserializable_content(self): + """get_data warns unserializable content in INSTANCE_JSON_FILE.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected_userdata = { + 'key1': 'val1', + 'key2': { + 'key2.1': "Warning: redacted unserializable type "}} + instance_json = util.load_json(content) + self.assertEqual( + expected_userdata, instance_json['ds']['user-data']) + + @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes") + def test_get_data_base64encodes_unserializable_bytes(self): + """On py3, get_data base64encodes any unserializable content.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + instance_json = util.load_json(content) + self.assertEqual( + ['ds/user-data/key2/key2.1'], + instance_json['base64-encoded-keys']) + self.assertEqual( + {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, + instance_json['ds']['user-data']) + + @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes") + def test_get_data_handles_bytes_values(self): + """On py2 get_data handles bytes values without having to b64encode.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + instance_json = util.load_json(content) + self.assertEqual([], instance_json['base64-encoded-keys']) + self.assertEqual( + {'key1': 'val1', 'key2': {'key2.1': '\x123'}}, + instance_json['ds']['user-data']) + + @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") + def test_non_utf8_encoding_logs_warning(self): + """When non-utf-8 values exist in py2 instance-data is not written.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + self.assertFalse(os.path.exists(json_file)) + self.assertIn( + "WARNING: Error persisting instance-data.json: 'utf8' codec can't" + " decode byte 0xaa in position 2: invalid start byte", + self.logs.getvalue()) diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 6f88a5b7..feb884ab 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -3,7 +3,6 @@ from __future__ import print_function import functools -import json import logging import os import shutil @@ -337,12 +336,6 @@ def dir2dict(startdir, prefix=None): return flist -def json_dumps(data): - # print data in nicely formatted json. - return json.dumps(data, indent=1, sort_keys=True, - separators=(',', ': ')) - - def wrap_and_call(prefix, mocks, func, *args, **kwargs): """ call func(args, **kwargs) with mocks applied, then unapplies mocks diff --git a/cloudinit/util.py b/cloudinit/util.py index 320d64e0..11e96a77 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -533,15 +533,6 @@ def multi_log(text, console=True, stderr=True, log.log(log_level, text) -def load_json(text, root_types=(dict,)): - decoded = json.loads(decode_binary(text)) - if not isinstance(decoded, tuple(root_types)): - expected_types = ", ".join([str(t) for t in root_types]) - raise TypeError("(%s) root types expected, got %s instead" - % (expected_types, type(decoded))) - return decoded - - def is_ipv4(instr): """determine if input string is a ipv4 address. return boolean.""" toks = instr.split('.') @@ -1480,7 +1471,31 @@ def ensure_dirs(dirlist, mode=0o755): ensure_dir(d, mode) +def load_json(text, root_types=(dict,)): + decoded = json.loads(decode_binary(text)) + if not isinstance(decoded, tuple(root_types)): + expected_types = ", ".join([str(t) for t in root_types]) + raise TypeError("(%s) root types expected, got %s instead" + % (expected_types, type(decoded))) + return decoded + + +def json_serialize_default(_obj): + """Handler for types which aren't json serializable.""" + try: + return 'ci-b64:{0}'.format(b64e(_obj)) + except AttributeError: + return 'Warning: redacted unserializable type {0}'.format(type(_obj)) + + +def json_dumps(data): + """Return data in nicely formatted json.""" + return json.dumps(data, indent=1, sort_keys=True, + separators=(',', ': '), default=json_serialize_default) + + def yaml_dumps(obj, explicit_start=True, explicit_end=True): + """Return data in nicely formatted yaml.""" return yaml.safe_dump(obj, line_break="\n", indent=4, diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index 82ee9714..714f5dac 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -67,7 +67,7 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): super(TestAliYunDatasource, self).setUp() cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} distro = {} - paths = helpers.Paths({}) + paths = helpers.Paths({'run_dir': self.tmp_dir()}) self.ds = ay.DataSourceAliYun(cfg, distro, paths) self.metadata_address = self.ds.metadata_urls[0] diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index a4dfb540..3253f3ad 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -18,7 +18,7 @@ import tempfile from cloudinit import helpers from cloudinit import util -from cloudinit.tests.helpers import TestCase +from cloudinit.tests.helpers import CiTestCase import cloudinit.sources.DataSourceAltCloud as dsac @@ -97,7 +97,7 @@ def _dmi_data(expected): return _data -class TestGetCloudType(TestCase): +class TestGetCloudType(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.get_cloud_type() ''' @@ -143,14 +143,16 @@ class TestGetCloudType(TestCase): self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) -class TestGetDataCloudInfoFile(TestCase): +class TestGetDataCloudInfoFile(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.get_data() With a contrived CLOUD_INFO_FILE ''' def setUp(self): '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.cloud_info_file = tempfile.mkstemp()[1] self.dmi_data = util.read_dmi_data dsac.CLOUD_INFO_FILE = self.cloud_info_file @@ -207,14 +209,16 @@ class TestGetDataCloudInfoFile(TestCase): self.assertEqual(False, dsrc.get_data()) -class TestGetDataNoCloudInfoFile(TestCase): +class TestGetDataNoCloudInfoFile(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.get_data() Without a CLOUD_INFO_FILE ''' def setUp(self): '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.dmi_data = util.read_dmi_data dsac.CLOUD_INFO_FILE = \ 'no such file' @@ -254,7 +258,7 @@ class TestGetDataNoCloudInfoFile(TestCase): self.assertEqual(False, dsrc.get_data()) -class TestUserDataRhevm(TestCase): +class TestUserDataRhevm(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.user_data_rhevm() ''' @@ -320,7 +324,7 @@ class TestUserDataRhevm(TestCase): self.assertEqual(False, dsrc.user_data_rhevm()) -class TestUserDataVsphere(TestCase): +class TestUserDataVsphere(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.user_data_vsphere() ''' @@ -368,7 +372,7 @@ class TestUserDataVsphere(TestCase): self.assertEqual(1, m_mount_cb.call_count) -class TestReadUserDataCallback(TestCase): +class TestReadUserDataCallback(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.read_user_data_callback() ''' diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 7cb1812a..226c214a 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -11,9 +11,7 @@ from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, import crypt import os -import shutil import stat -import tempfile import xml.etree.ElementTree as ET import yaml @@ -84,11 +82,11 @@ class TestAzureDataSource(CiTestCase): super(TestAzureDataSource, self).setUp() if PY26: raise SkipTest("Does not work on python 2.6") - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) + self.tmp = self.tmp_dir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') self.patches = ExitStack() @@ -642,7 +640,7 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertEqual(netconfig, expected_config) -class TestAzureBounce(TestCase): +class TestAzureBounce(CiTestCase): def mock_out_azure_moving_parts(self): self.patches.enter_context( @@ -669,10 +667,10 @@ class TestAzureBounce(TestCase): def setUp(self): super(TestAzureBounce, self).setUp() - self.tmp = tempfile.mkdtemp() + self.tmp = self.tmp_dir() self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - self.addCleanup(shutil.rmtree, self.tmp) + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.patches = ExitStack() self.mock_out_azure_moving_parts() @@ -714,21 +712,24 @@ class TestAzureBounce(TestCase): def test_disabled_bounce_does_not_change_hostname(self): cfg = {'hostname_bounce': {'policy': 'off'}} - self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() self.assertEqual(0, self.set_hostname.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') def test_disabled_bounce_does_not_perform_bounce( self, perform_hostname_bounce): cfg = {'hostname_bounce': {'policy': 'off'}} - self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() self.assertEqual(0, perform_hostname_bounce.call_count) def test_same_hostname_does_not_change_hostname(self): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'yes'}} - self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() self.assertEqual(0, self.set_hostname.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') @@ -737,7 +738,8 @@ class TestAzureBounce(TestCase): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'yes'}} - self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() self.assertEqual(0, perform_hostname_bounce.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index e4c59907..f6a59b6b 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -3,6 +3,7 @@ import copy from cloudinit.cs_utils import Cepko +from cloudinit import helpers from cloudinit import sources from cloudinit.sources import DataSourceCloudSigma @@ -38,10 +39,12 @@ class CepkoMock(Cepko): return self -class DataSourceCloudSigmaTest(test_helpers.TestCase): +class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + "", "", paths=self.paths) self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) self.datasource.get_data() @@ -85,7 +88,8 @@ class DataSourceCloudSigmaTest(test_helpers.TestCase): def test_lack_of_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() @@ -94,7 +98,8 @@ class DataSourceCloudSigmaTest(test_helpers.TestCase): def test_lack_of_cloudinit_key_in_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"]["cloudinit"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index 96144b64..d6d2d6b2 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -33,6 +33,7 @@ class TestCloudStackPasswordFetching(CiTestCase): self.patches.enter_context(mock.patch( mod_name + '.dhcp.networkd_get_option_from_leases', get_networkd_server_address)) + self.tmp = self.tmp_dir() def _set_password_server_response(self, response_string): subp = mock.MagicMock(return_value=(response_string, '')) @@ -43,26 +44,30 @@ class TestCloudStackPasswordFetching(CiTestCase): def test_empty_password_doesnt_create_config(self): self._set_password_server_response('') - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertEqual({}, ds.get_config_obj()) def test_saved_password_doesnt_create_config(self): self._set_password_server_response('saved_password') - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertEqual({}, ds.get_config_obj()) def test_password_sets_password(self): password = 'SekritSquirrel' self._set_password_server_response(password) - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertEqual(password, ds.get_config_obj()['password']) def test_bad_request_doesnt_stop_ds_from_working(self): self._set_password_server_response('bad_request') - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) self.assertTrue(ds.get_data()) def assertRequestTypesSent(self, subp, expected_request_types): @@ -77,14 +82,16 @@ class TestCloudStackPasswordFetching(CiTestCase): def test_valid_response_means_password_marked_as_saved(self): password = 'SekritSquirrel' subp = self._set_password_server_response(password) - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertRequestTypesSent(subp, ['send_my_password', 'saved_password']) def _check_password_not_saved_for(self, response_string): subp = self._set_password_server_response(response_string) - ds = DataSourceCloudStack({}, None, helpers.Paths({})) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) ds.get_data() self.assertRequestTypesSent(subp, ['send_my_password']) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 237c189b..98497886 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -725,8 +725,9 @@ class TestConvertNetworkData(TestCase): def cfg_ds_from_dir(seed_d): + tmp = tempfile.mkdtemp() cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': tmp})) cfg_ds.seed_dir = seed_d cfg_ds.known_macs = KNOWN_MACS.copy() if not cfg_ds.get_data(): diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index f264f361..ec321733 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -13,7 +13,7 @@ from cloudinit import settings from cloudinit.sources import DataSourceDigitalOcean from cloudinit.sources.helpers import digitalocean -from cloudinit.tests.helpers import mock, TestCase +from cloudinit.tests.helpers import mock, CiTestCase DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] @@ -135,14 +135,17 @@ def _mock_dmi(): return (True, DO_META.get('id')) -class TestDataSourceDigitalOcean(TestCase): +class TestDataSourceDigitalOcean(CiTestCase): """ Test reading the meta-data """ + def setUp(self): + super(TestDataSourceDigitalOcean, self).setUp() + self.tmp = self.tmp_dir() def get_ds(self, get_sysinfo=_mock_dmi): ds = DataSourceDigitalOcean.DataSourceDigitalOcean( - settings.CFG_BUILTIN, None, helpers.Paths({})) + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) ds.use_ip4LL = False if get_sysinfo is not None: ds._get_sysinfo = get_sysinfo @@ -194,7 +197,7 @@ class TestDataSourceDigitalOcean(TestCase): self.assertIsInstance(ds.get_public_ssh_keys(), list) -class TestNetworkConvert(TestCase): +class TestNetworkConvert(CiTestCase): @mock.patch('cloudinit.net.get_interfaces_by_mac') def _get_networking(self, m_get_by_mac): diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index ba328ee9..ba042eac 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -186,6 +186,7 @@ class TestEc2(test_helpers.HttprettyTestCase): super(TestEc2, self).setUp() self.datasource = ec2.DataSourceEc2 self.metadata_addr = self.datasource.metadata_urls[0] + self.tmp = self.tmp_dir() def data_url(self, version): """Return a metadata url based on the version provided.""" @@ -199,7 +200,7 @@ class TestEc2(test_helpers.HttprettyTestCase): def _setup_ds(self, sys_cfg, platform_data, md, md_version=None): self.uris = [] distro = {} - paths = helpers.Paths({}) + paths = helpers.Paths({'run_dir': self.tmp}) if sys_cfg is None: sys_cfg = {} ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index d399ae7a..82c788dc 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -70,9 +70,10 @@ def _set_mock_metadata(gce_meta=None): class TestDataSourceGCE(test_helpers.HttprettyTestCase): def setUp(self): + tmp = self.tmp_dir() self.ds = DataSourceGCE.DataSourceGCE( settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': tmp})) ppatch = self.m_platform_reports_gce = mock.patch( 'cloudinit.sources.DataSourceGCE.platform_reports_gce') self.m_platform_reports_gce = ppatch.start() diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index fea9156b..70d50de4 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -3,22 +3,20 @@ from cloudinit import helpers from cloudinit.sources import DataSourceNoCloud from cloudinit import util -from cloudinit.tests.helpers import TestCase, populate_dir, mock, ExitStack +from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack import os -import shutil -import tempfile import textwrap import yaml -class TestNoCloudDataSource(TestCase): +class TestNoCloudDataSource(CiTestCase): def setUp(self): super(TestNoCloudDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.cmdline = "root=TESTCMDLINE" @@ -215,7 +213,7 @@ class TestNoCloudDataSource(TestCase): self.assertNotIn(gateway, str(dsrc.network_config)) -class TestParseCommandLineData(TestCase): +class TestParseCommandLineData(CiTestCase): def test_parse_cmdline_data_valid(self): ds_id = "ds=nocloud" diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index e7d55692..2326dd58 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -3,12 +3,10 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util -from cloudinit.tests.helpers import mock, populate_dir, TestCase +from cloudinit.tests.helpers import mock, populate_dir, CiTestCase import os import pwd -import shutil -import tempfile import unittest @@ -36,14 +34,14 @@ PUBLIC_IP = '10.0.0.3' DS_PATH = "cloudinit.sources.DataSourceOpenNebula" -class TestOpenNebulaDataSource(TestCase): +class TestOpenNebulaDataSource(CiTestCase): parsed_user = None def setUp(self): super(TestOpenNebulaDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) # defaults for few tests self.ds = ds.DataSourceOpenNebula diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index ed367e05..42c31554 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -131,6 +131,10 @@ def _read_metadata_service(): class TestOpenStackDataSource(test_helpers.HttprettyTestCase): VERSION = 'latest' + def setUp(self): + super(TestOpenStackDataSource, self).setUp() + self.tmp = self.tmp_dir() + @hp.activate def test_successful(self): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) @@ -232,7 +236,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) found = ds_os.get_data() self.assertTrue(found) @@ -256,7 +260,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) found = ds_os.get_data() self.assertFalse(found) @@ -271,7 +275,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) ds_os.ds_cfg = { 'max_wait': 0, 'timeout': 0, @@ -294,7 +298,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, None, - helpers.Paths({})) + helpers.Paths({'run_dir': self.tmp})) ds_os.ds_cfg = { 'max_wait': 0, 'timeout': 0, diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 436df9ee..8dec06b1 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -9,7 +9,7 @@ from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceScaleway -from cloudinit.tests.helpers import mock, HttprettyTestCase, TestCase +from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase class DataResponses(object): @@ -63,7 +63,11 @@ class MetadataResponses(object): return 200, headers, json.dumps(cls.FAKE_METADATA) -class TestOnScaleway(TestCase): +class TestOnScaleway(CiTestCase): + + def setUp(self): + super(TestOnScaleway, self).setUp() + self.tmp = self.tmp_dir() def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): mock, faked = fake_dmi @@ -91,7 +95,7 @@ class TestOnScaleway(TestCase): # When not on Scaleway, get_data() returns False. datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({}) + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}) ) self.assertFalse(datasource.get_data()) @@ -159,8 +163,9 @@ def get_source_address_adapter(*args, **kwargs): class TestDataSourceScaleway(HttprettyTestCase): def setUp(self): + tmp = self.tmp_dir() self.datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({}) + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp}) ) super(TestDataSourceScaleway, self).setUp() diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 933d5b63..88bae5f9 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -359,7 +359,8 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.tmp = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp) - self.paths = c_helpers.Paths({'cloud_dir': self.tmp}) + self.paths = c_helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) self.legacy_user_d = os.path.join(self.tmp, 'legacy_user_tmp') os.mkdir(self.legacy_user_d) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 1284e755..7a920d42 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -7,7 +7,7 @@ from uuid import uuid4 from cloudinit import safeyaml from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, dir2dict, json_dumps, populate_dir) + CiTestCase, dir2dict, populate_dir) UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") @@ -319,7 +319,7 @@ def _print_run_output(rc, out, err, cfg, files): '-- rc = %s --' % rc, '-- out --', str(out), '-- err --', str(err), - '-- cfg --', json_dumps(cfg)])) + '-- cfg --', util.json_dumps(cfg)])) print('-- files --') for k, v in files.items(): if "/_shwrap" in k: diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index add93653..5d3f1ca3 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -23,6 +23,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): cfg = { 'datasource_list': ['None'], 'cloud_init_modules': ['write-files'], + 'system_info': {'paths': {'run_dir': new_root}} } ud = self.readResource('user_data.1.txt') cloud_cfg = util.yaml_dumps(cfg) diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index b8fb4794..762974e9 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -2,10 +2,10 @@ import os -from cloudinit.tests import helpers from cloudinit.settings import PER_INSTANCE from cloudinit import stages +from cloudinit.tests import helpers from cloudinit import util @@ -23,6 +23,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): 'datasource_list': ['None'], 'runcmd': ['ls /etc'], # test ALL_DISTROS 'spacewalk': {}, # test non-ubuntu distros module definition + 'system_info': {'paths': {'run_dir': self.new_root}}, 'write_files': [ { 'path': '/etc/blah.ini', -- cgit v1.2.3 From a110e483e8644ab73e69853ea11b6c4c6cfa04b6 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 6 Dec 2017 16:30:22 -0600 Subject: pylint: Update pylint to 1.7.1, run on tests/ and tools and fix complaints. The motivation for this is that a.) 1.7.1 runs with python 3.6 (bionic) b.) we want to run pylint on tests/ and tools for the same reasons that we want to run it on cloudinit/ The changes are described below. - Update tox.ini to invoke pylint v1.7.1. - Modify .pylintrc generated-members ignore mocked object members (m_.*) - Replace "dangerous" params defaulting to {} - Fix up cloud_tests use of platforms - Cast some instance objects to with dict() - Handle python2.7 vs 3+ ConfigParser use of readfp (deprecated) - Update use of assertEqual(, value) to assert(value) - replace depricated assertRegexp -> assertRegex - Remove useless test-class calls to super class - Assign class property accessors a result and use it - Fix missing class member in CepkoResultTests - Fix Cheetah test import --- .pylintrc | 2 +- cloudinit/cmd/tests/test_clean.py | 2 +- cloudinit/cmd/tests/test_status.py | 2 +- cloudinit/tests/helpers.py | 35 +++++ tests/cloud_tests/__init__.py | 6 + tests/cloud_tests/bddeb.py | 9 +- tests/cloud_tests/collect.py | 6 +- tests/cloud_tests/config.py | 4 +- tests/cloud_tests/testcases/base.py | 3 +- .../testcases/modules/set_hostname_fqdn.py | 2 +- tests/cloud_tests/util.py | 2 +- tests/unittests/test_cs_util.py | 1 + tests/unittests/test_datasource/test_azure.py | 31 ++-- .../unittests/test_datasource/test_digitalocean.py | 9 +- tests/unittests/test_datasource/test_ec2.py | 3 +- tests/unittests/test_distros/test_create_users.py | 7 +- tests/unittests/test_distros/test_netconfig.py | 3 - tests/unittests/test_handler/test_handler_lxd.py | 3 - .../test_handler/test_handler_power_state.py | 3 - .../test_handler/test_handler_yum_add_repo.py | 10 +- .../test_handler/test_handler_zypper_add_repo.py | 7 +- tests/unittests/test_reporting.py | 2 +- tests/unittests/test_templating.py | 2 +- tests/unittests/test_util.py | 6 +- tests/unittests/test_vmware_config_file.py | 3 +- tools/hacking.py | 172 --------------------- tools/make-mime.py | 2 +- tools/mock-meta.py | 45 +++--- tox.ini | 5 +- 29 files changed, 121 insertions(+), 266 deletions(-) delete mode 100755 tools/hacking.py (limited to 'cloudinit') diff --git a/.pylintrc b/.pylintrc index b160ce7b..3ad36924 100644 --- a/.pylintrc +++ b/.pylintrc @@ -56,5 +56,5 @@ ignored-classes=optparse.Values,thread._local # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. -generated-members=types,http.client,command_handlers +generated-members=types,http.client,command_handlers,m_.* diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index af438aab..1379740b 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -151,7 +151,7 @@ class TestClean(CiTestCase): 'sys.argv': {'new': ['clean', '--logs']}}, clean.main) - self.assertEqual(0, context_manager.exception.code) + self.assertRaisesCodeEqual(0, context_manager.exception.code) self.assertFalse( os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index 8ec9b5bc..6d4a11e8 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -347,7 +347,7 @@ class TestStatus(CiTestCase): '_is_cloudinit_disabled': (False, ''), 'Init': {'side_effect': self.init_class}}, status.main) - self.assertEqual(0, context_manager.exception.code) + self.assertRaisesCodeEqual(0, context_manager.exception.code) self.assertEqual('status: running\n', m_stdout.getvalue()) # vi: ts=4 expandtab syntax=python diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index feb884ab..0080c729 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -19,6 +19,11 @@ try: except ImportError: from contextlib2 import ExitStack +try: + from configparser import ConfigParser +except ImportError: + from ConfigParser import ConfigParser + from cloudinit import helpers as ch from cloudinit import util @@ -113,6 +118,16 @@ class TestCase(unittest2.TestCase): self.addCleanup(m.stop) setattr(self, attr, p) + # prefer python3 read_file over readfp but allow fallback + def parse_and_read(self, contents): + parser = ConfigParser() + if hasattr(parser, 'read_file'): + parser.read_file(contents) + elif hasattr(parser, 'readfp'): + # pylint: disable=W1505 + parser.readfp(contents) + return parser + class CiTestCase(TestCase): """This is the preferred test case base class unless user @@ -158,6 +173,18 @@ class CiTestCase(TestCase): dir = self.tmp_dir() return os.path.normpath(os.path.abspath(os.path.join(dir, path))) + def assertRaisesCodeEqual(self, expected, found): + """Handle centos6 having different context manager for assertRaises. + with assertRaises(Exception) as e: + raise Exception("BOO") + + centos6 will have e.exception as an integer. + anything nwere will have it as something with a '.code'""" + if isinstance(found, int): + self.assertEqual(expected, found) + else: + self.assertEqual(expected, found.code) + class ResourceUsingTestCase(CiTestCase): @@ -395,4 +422,12 @@ if not hasattr(mock.Mock, 'assert_not_called'): mock.Mock.assert_not_called = __mock_assert_not_called +# older unittest2.TestCase (centos6) do not have assertRaisesRegex +# And setting assertRaisesRegex to assertRaisesRegexp causes +# https://github.com/PyCQA/pylint/issues/1653 . So the workaround. +if not hasattr(unittest2.TestCase, 'assertRaisesRegex'): + def _tricky(*args, **kwargs): + return unittest2.TestCase.assertRaisesRegexp + unittest2.TestCase.assertRaisesRegex = _tricky + # vi: ts=4 expandtab diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py index 98c1d6c7..dd436989 100644 --- a/tests/cloud_tests/__init__.py +++ b/tests/cloud_tests/__init__.py @@ -10,6 +10,12 @@ TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases') TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases') TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2]) +# This domain contains reverse lookups for hostnames that are used. +# The primary reason is so sudo will return quickly when it attempts +# to look up the hostname. i9n is just short for 'integration'. +# see also bug 1730744 for why we had to do this. +CI_DOMAIN = "i9n.cloud-init.io" + def _initialize_logging(): """Configure logging for cloud_tests.""" diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py index c259dfea..a6d5069f 100644 --- a/tests/cloud_tests/bddeb.py +++ b/tests/cloud_tests/bddeb.py @@ -8,8 +8,7 @@ import tempfile from cloudinit import util as c_util from tests.cloud_tests import (config, LOG) -from tests.cloud_tests.platforms import (platforms, images, snapshots, - instances) +from tests.cloud_tests import platforms from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) pre_reqs = ['devscripts', 'equivs', 'git', 'tar'] @@ -85,18 +84,18 @@ def setup_build(args): # set up image LOG.info('acquiring image for os: %s', args.build_os) img_conf = config.load_os_config(platform.platform_name, args.build_os) - image_call = partial(images.get_image, platform, img_conf) + image_call = partial(platforms.get_image, platform, img_conf) with PlatformComponent(image_call) as image: # set up snapshot - snapshot_call = partial(snapshots.get_snapshot, image) + snapshot_call = partial(platforms.get_snapshot, image) with PlatformComponent(snapshot_call) as snapshot: # create instance with cloud-config to set it up LOG.info('creating instance to build deb in') empty_cloud_config = "#cloud-config\n{}" instance_call = partial( - instances.get_instance, snapshot, empty_cloud_config, + platforms.get_instance, snapshot, empty_cloud_config, use_desc='build cloud-init deb') with PlatformComponent(instance_call) as instance: diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index db5ee99f..4805cea1 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -64,9 +64,9 @@ def collect_test_data(args, snapshot, os_name, test_name): # skip the testcase with a warning req_features = test_config.get('required_features', []) if any(feature not in snapshot.features for feature in req_features): - LOG.warn('test config %s requires features not supported by image, ' - 'skipping.\nrequired features: %s\nsupported features: %s', - test_name, req_features, snapshot.features) + LOG.warning('test config %s requires features not supported by image, ' + 'skipping.\nrequired features: %s\nsupported features: %s', + test_name, req_features, snapshot.features) return ({}, 0) # if there are user data overrides required for this test case, apply them diff --git a/tests/cloud_tests/config.py b/tests/cloud_tests/config.py index 52fc2bda..8bd569fd 100644 --- a/tests/cloud_tests/config.py +++ b/tests/cloud_tests/config.py @@ -92,7 +92,7 @@ def load_platform_config(platform_name, require_enabled=False): def load_os_config(platform_name, os_name, require_enabled=False, - feature_overrides={}): + feature_overrides=None): """Load configuration for os. @param platform_name: platform name to load os config for @@ -101,6 +101,8 @@ def load_os_config(platform_name, os_name, require_enabled=False, @param feature_overrides: feature flag overrides to merge with features @return_value: config dict """ + if feature_overrides is None: + feature_overrides = {} main_conf = c_util.read_conf(RELEASES_CONF) default = main_conf['default_release_config'] image = main_conf['releases'][os_name] diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 1706f59b..1c5b5405 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -12,7 +12,8 @@ from cloudinit import util as c_util class CloudTestCase(unittest.TestCase): """Base test class for verifiers.""" - data = None + # data gets populated in get_suite.setUpClass + data = {} conf = None _cloud_config = None diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py index eb6f0650..a405b30b 100644 --- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py +++ b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. """cloud-init Integration Test Verify Script.""" -from tests.cloud_tests.instances.nocloudkvm import CI_DOMAIN +from tests.cloud_tests import CI_DOMAIN from tests.cloud_tests.testcases import base diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py index c5cd6974..2aedcd0d 100644 --- a/tests/cloud_tests/util.py +++ b/tests/cloud_tests/util.py @@ -262,7 +262,7 @@ def shell_safe(cmd): out = subprocess.check_output( ["getopt", "--shell", "sh", "--options", "", "--", "--"] + list(cmd)) # out contains ' -- \n'. drop the ' -- ' and the '\n' - return out[4:-1].decode() + return out.decode()[4:-1] def shell_pack(cmd): diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py index ee88520d..2a1095b9 100644 --- a/tests/unittests/test_cs_util.py +++ b/tests/unittests/test_cs_util.py @@ -35,6 +35,7 @@ class CepkoMock(Cepko): # touched the underlying Cepko class methods. class CepkoResultTests(test_helpers.TestCase): def setUp(self): + self.c = Cepko() raise test_helpers.SkipTest('This test is completely useless') def test_getitem(self): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 226c214a..5ab48897 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -36,9 +36,9 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): """ for key, dval in data.items(): if isinstance(dval, dict): - val = dval.get('text') - attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v in dval.items() - if k != 'text']) + val = dict(dval).get('text') + attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v + in dict(dval).items() if k != 'text']) else: val = dval attrs = "" @@ -897,9 +897,6 @@ class TestCanDevBeReformatted(CiTestCase): setattr(self, sattr, patcher.start()) self.addCleanup(patcher.stop) - def setUp(self): - super(TestCanDevBeReformatted, self).setUp() - def patchup(self, devs): bypath = {} for path, data in devs.items(): @@ -954,14 +951,14 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda3': {'num': 3}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(False, value) + self.assertFalse(value) self.assertIn("3 or more", msg.lower()) def test_no_partitions_is_false(self): """A disk with no partitions can not be formatted.""" self.patchup({'/dev/sda': {}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("not partitioned", msg.lower()) def test_two_partitions_not_ntfs_false(self): @@ -973,7 +970,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(False, value) + self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) def test_two_partitions_ntfs_populated_false(self): @@ -986,7 +983,7 @@ class TestCanDevBeReformatted(CiTestCase): 'files': ['secret.txt']}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(False, value) + self.assertFalse(value) self.assertIn("files on it", msg.lower()) def test_two_partitions_ntfs_empty_is_true(self): @@ -998,7 +995,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_not_ntfs_false(self): @@ -1009,7 +1006,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'zfs'}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) def test_one_partition_ntfs_populated_false(self): @@ -1021,7 +1018,7 @@ class TestCanDevBeReformatted(CiTestCase): 'files': ['file1.txt', 'file2.exe']}, }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("files on it", msg.lower()) def test_one_partition_ntfs_empty_is_true(self): @@ -1032,7 +1029,7 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): @@ -1044,7 +1041,7 @@ class TestCanDevBeReformatted(CiTestCase): 'files': ['dataloss_warning_readme.txt']} }}}) value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_through_realpath_is_true(self): @@ -1059,7 +1056,7 @@ class TestCanDevBeReformatted(CiTestCase): 'realpath': '/dev/sdb1'} }}}) value, msg = dsaz.can_dev_be_reformatted(epath) - self.assertEqual(True, value) + self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_three_partition_through_realpath_is_false(self): @@ -1078,7 +1075,7 @@ class TestCanDevBeReformatted(CiTestCase): 'realpath': '/dev/sdb3'} }}}) value, msg = dsaz.can_dev_be_reformatted(epath) - self.assertEqual(False, value) + self.assertFalse(value) self.assertIn("3 or more", msg.lower()) diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index ec321733..3127014b 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -199,9 +199,8 @@ class TestDataSourceDigitalOcean(CiTestCase): class TestNetworkConvert(CiTestCase): - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def _get_networking(self, m_get_by_mac): - m_get_by_mac.return_value = { + def _get_networking(self): + self.m_get_by_mac.return_value = { '04:01:57:d1:9e:01': 'ens1', '04:01:57:d1:9e:02': 'ens2', 'b8:ae:ed:75:5f:9a': 'enp0s25', @@ -211,6 +210,10 @@ class TestNetworkConvert(CiTestCase): self.assertIn('config', netcfg) return netcfg + def setUp(self): + super(TestNetworkConvert, self).setUp() + self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac') + def test_networking_defined(self): netcfg = self._get_networking() self.assertIsNotNone(netcfg) diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index ba042eac..f0dc8338 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -330,7 +330,8 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.fallback_nic = 'eth9' with mock.patch(get_interface_mac_path) as m_get_interface_mac: m_get_interface_mac.return_value = mac1 - ds.network_config # Will re-crawl network metadata + nc = ds.network_config # Will re-crawl network metadata + self.assertIsNotNone(nc) self.assertIn('Re-crawl of metadata service', self.logs.getvalue()) expected = {'version': 1, 'config': [ {'mac_address': '06:17:04:d7:26:09', diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index aa13670a..5670904a 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -7,7 +7,11 @@ from cloudinit.tests.helpers import (TestCase, mock) class MyBaseDistro(distros.Distro): # MyBaseDistro is here to test base Distro class implementations - def __init__(self, name="basedistro", cfg={}, paths={}): + def __init__(self, name="basedistro", cfg=None, paths=None): + if not cfg: + cfg = {} + if not paths: + paths = {} super(MyBaseDistro, self).__init__(name, cfg, paths) def install_packages(self, pkglist): @@ -42,7 +46,6 @@ class MyBaseDistro(distros.Distro): @mock.patch("cloudinit.distros.util.subp") class TestCreateUser(TestCase): def setUp(self): - super(TestCase, self).setUp() self.dist = MyBaseDistro() def _useradd2call(self, args): diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index c4bd11bc..8d0b2634 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -188,9 +188,6 @@ hn0: flags=8843 metric 0 mtu 1500 status: active """ - def setUp(self): - super(TestNetCfgDistro, self).setUp() - def _get_distro(self, dname, renderers=None): cls = distros.fetch(dname) cfg = settings.CFG_BUILTIN diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index e0d9ab6c..a2054980 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -25,9 +25,6 @@ class TestLxd(t_help.CiTestCase): } } - def setUp(self): - super(TestLxd, self).setUp() - def _get_cloud(self, distro): cls = distros.fetch(distro) paths = helpers.Paths({}) diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 85a0fe0a..3c726422 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -9,9 +9,6 @@ from cloudinit.tests.helpers import mock class TestLoadPowerState(t_help.TestCase): - def setUp(self): - super(self.__class__, self).setUp() - def test_no_config(self): # completely empty config should mean do nothing (cmd, _timeout, _condition) = psc.load_power_state({}) diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index b7adbe50..b90a3af3 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -5,10 +5,6 @@ from cloudinit import util from cloudinit.tests import helpers -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser import logging import shutil from six import StringIO @@ -58,8 +54,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") - parser = ConfigParser() - parser.readfp(StringIO(contents)) + parser = self.parse_and_read(StringIO(contents)) expected = { 'epel_testing': { 'name': 'Extra Packages for Enterprise Linux 5 - Testing', @@ -95,8 +90,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo") - parser = ConfigParser() - parser.readfp(StringIO(contents)) + parser = self.parse_and_read(StringIO(contents)) expected = { 'puppetlabs_products': { 'name': 'Puppet Labs Products El 6 - $basearch', diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/test_handler/test_handler_zypper_add_repo.py index 315c2a5e..72ab6c08 100644 --- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py +++ b/tests/unittests/test_handler/test_handler_zypper_add_repo.py @@ -9,10 +9,6 @@ from cloudinit import util from cloudinit.tests import helpers from cloudinit.tests.helpers import mock -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser import logging from six import StringIO @@ -70,8 +66,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): root_d = self.tmp_dir() cc_zypper_add_repo._write_repos(cfg['repos'], root_d) contents = util.load_file("%s/testing-foo.repo" % root_d) - parser = ConfigParser() - parser.readfp(StringIO(contents)) + parser = self.parse_and_read(StringIO(contents)) expected = { 'testing-foo': { 'name': 'test-foo', diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index 571420ed..e15ba6cf 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -126,7 +126,7 @@ class TestBaseReportingHandler(TestCase): def test_base_reporting_handler_is_abstract(self): regexp = r".*abstract.*publish_event.*" - self.assertRaisesRegexp(TypeError, regexp, handlers.ReportingHandler) + self.assertRaisesRegex(TypeError, regexp, handlers.ReportingHandler) class TestLogHandler(TestCase): diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index b911d929..53154d33 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -14,7 +14,7 @@ from cloudinit import templater try: import Cheetah HAS_CHEETAH = True - Cheetah # make pyflakes happy, as Cheetah is not used here + c = Cheetah # make pyflakes and pylint happy, as Cheetah is not used here except ImportError: HAS_CHEETAH = False diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 71f59529..787ca208 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -695,9 +695,9 @@ class TestSubp(helpers.CiTestCase): util.write_file(noshebang, 'true\n') os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) - self.assertRaisesRegexp(util.ProcessExecutionError, - 'Missing #! in script\?', - util.subp, (noshebang,)) + self.assertRaisesRegex(util.ProcessExecutionError, + 'Missing #! in script\?', + util.subp, (noshebang,)) def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 808d303a..0f8cda95 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -133,7 +133,8 @@ class TestVmwareConfigFile(CiTestCase): conf = Config(cf) with self.assertRaises(ValueError): - conf.reset_password() + pw = conf.reset_password + self.assertIsNone(pw) cf.clear() cf._insertKey("PASSWORD|RESET", "yes") diff --git a/tools/hacking.py b/tools/hacking.py deleted file mode 100755 index e6a05136..00000000 --- a/tools/hacking.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2012, Cloudscaling -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""cloudinit HACKING file compliance testing (based off of nova hacking.py) - -built on top of pep8.py -""" - -import inspect -import logging -import re -import sys - -import pep8 - -# Don't need this for testing -logging.disable('LOG') - -# N1xx comments -# N2xx except -# N3xx imports -# N4xx docstrings -# N[5-9]XX (future use) - -DOCSTRING_TRIPLE = ['"""', "'''"] -VERBOSE_MISSING_IMPORT = False -_missingImport = set([]) - - -def import_normalize(line): - # convert "from x import y" to "import x.y" - # handle "from x import y as z" to "import x.y as z" - split_line = line.split() - if (line.startswith("from ") and "," not in line and - split_line[2] == "import" and split_line[3] != "*" and - split_line[1] != "__future__" and - (len(split_line) == 4 or (len(split_line) == 6 and - split_line[4] == "as"))): - return "import %s.%s" % (split_line[1], split_line[3]) - else: - return line - - -def cloud_import_alphabetical(physical_line, line_number, lines): - """Check for imports in alphabetical order. - - HACKING guide recommendation for imports: - imports in human alphabetical order - N306 - """ - # handle import x - # use .lower since capitalization shouldn't dictate order - split_line = import_normalize(physical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2]) - split_previous = split_previous.strip().lower().split() - # with or without "as y" - length = [2, 4] - if (len(split_line) in length and len(split_previous) in length and - split_line[0] == "import" and split_previous[0] == "import"): - if split_line[1] < split_previous[1]: - return (0, "N306: imports not in alphabetical order (%s, %s)" - % (split_previous[1], split_line[1])) - - -def cloud_docstring_start_space(physical_line): - """Check for docstring not start with space. - - HACKING guide recommendation for docstring: - Docstring should not start with space - N401 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - if (pos != -1 and len(physical_line) > pos + 1): - if (physical_line[pos + 3] == ' '): - return (pos, - "N401: one line docstring should not start with a space") - - -def cloud_todo_format(physical_line): - """Check for 'TODO()'. - - HACKING guide recommendation for TODO: - Include your name with TODOs as in "#TODO(termie)" - N101 - """ - pos = physical_line.find('TODO') - pos1 = physical_line.find('TODO(') - pos2 = physical_line.find('#') # make sure it's a comment - if (pos != pos1 and pos2 >= 0 and pos2 < pos): - return pos, "N101: Use TODO(NAME)" - - -def cloud_docstring_one_line(physical_line): - """Check one line docstring end. - - HACKING guide recommendation for one line docstring: - A one line docstring looks like this and ends in a period. - N402 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end - if (pos != -1 and end and len(physical_line) > pos + 4): - if (physical_line[-5] != '.'): - return pos, "N402: one line docstring needs a period" - - -def cloud_docstring_multiline_end(physical_line): - """Check multi line docstring end. - - HACKING guide recommendation for docstring: - Docstring should end on a new line - N403 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - if (pos != -1 and len(physical_line) == pos): - print(physical_line) - if (physical_line[pos + 3] == ' '): - return (pos, "N403: multi line docstring end on new line") - - -current_file = "" - - -def readlines(filename): - """Record the current file being tested.""" - pep8.current_file = filename - return open(filename).readlines() - - -def add_cloud(): - """Monkey patch pep8 for cloud-init guidelines. - - Look for functions that start with cloud_ - and add them to pep8 module. - - Assumes you know how to write pep8.py checks - """ - for name, function in globals().items(): - if not inspect.isfunction(function): - continue - if name.startswith("cloud_"): - exec("pep8.%s = %s" % (name, name)) - - -if __name__ == "__main__": - # NOVA based 'hacking.py' error codes start with an N - pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}') - add_cloud() - pep8.current_file = current_file - pep8.readlines = readlines - try: - pep8._main() - finally: - if len(_missingImport) > 0: - sys.stderr.write( - "%i imports missing in this test environment\n" % - len(_missingImport)) - -# vi: ts=4 expandtab diff --git a/tools/make-mime.py b/tools/make-mime.py index f6a72044..d321479b 100755 --- a/tools/make-mime.py +++ b/tools/make-mime.py @@ -23,7 +23,7 @@ def file_content_type(text): filename, content_type = text.split(":", 1) return (open(filename, 'r'), filename, content_type.strip()) except ValueError: - raise argparse.ArgumentError("Invalid value for %r" % (text)) + raise argparse.ArgumentError(text, "Invalid value for %r" % (text)) def main(): diff --git a/tools/mock-meta.py b/tools/mock-meta.py index a5d14ab7..724f7fc4 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -17,6 +17,7 @@ Then: ec2metadata --instance-id """ +import argparse import functools import json import logging @@ -27,8 +28,6 @@ import string import sys import yaml -from optparse import OptionParser - try: from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler import httplib as hclient @@ -415,29 +414,27 @@ def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'): def extract_opts(): - parser = OptionParser() - parser.add_option("-p", "--port", dest="port", action="store", type=int, - default=80, metavar="PORT", - help=("port from which to serve traffic" - " (default: %default)")) - parser.add_option("-a", "--addr", dest="address", action="store", type=str, - default='::', metavar="ADDRESS", - help=("address from which to serve traffic" - " (default: %default)")) - parser.add_option("-f", '--user-data-file', dest='user_data_file', - action='store', metavar='FILE', - help=("user data filename to serve back to" - "incoming requests")) - (options, args) = parser.parse_args() - out = dict() - out['extra'] = args - out['port'] = options.port - out['user_data_file'] = None - out['address'] = options.address - if options.user_data_file: - if not os.path.isfile(options.user_data_file): + parser = argparse.ArgumentParser() + parser.add_argument("-p", "--port", dest="port", action="store", type=int, + default=80, metavar="PORT", + help=("port from which to serve traffic" + " (default: %default)")) + parser.add_argument("-a", "--addr", dest="address", action="store", + type=str, default='::', metavar="ADDRESS", + help=("address from which to serve traffic" + " (default: %default)")) + parser.add_argument("-f", '--user-data-file', dest='user_data_file', + action='store', metavar='FILE', + help=("user data filename to serve back to" + "incoming requests")) + parser.add_argument('extra', nargs='*') + args = parser.parse_args() + out = {'port': args.port, 'address': args.address, 'extra': args.extra, + 'user_data_file': None} + if args.user_data_file: + if not os.path.isfile(args.user_data_file): parser.error("Option -f specified a non-existent file") - with open(options.user_data_file, 'rb') as fh: + with open(args.user_data_file, 'rb') as fh: out['user_data_file'] = fh.read() return out diff --git a/tox.ini b/tox.ini index 92232201..d7316cc2 100644 --- a/tox.ini +++ b/tox.ini @@ -21,12 +21,13 @@ setenv = LC_ALL = en_US.utf-8 [testenv:pylint] +basepython = python3 deps = # requirements pylint==1.7.1 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt -commands = {envpython} -m pylint {posargs:cloudinit} +commands = {envpython} -m pylint {posargs:cloudinit tests tools} [testenv:py3] basepython = python3 @@ -119,7 +120,7 @@ commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} deps = pyflakes [testenv:tip-pylint] -commands = {envpython} -m pylint {posargs:cloudinit} +commands = {envpython} -m pylint {posargs:cloudinit tests tools} deps = # requirements pylint -- cgit v1.2.3 From ce33e423cde806a0590fec635778d62836e1bd37 Mon Sep 17 00:00:00 2001 From: Maitreyee Saikia Date: Fri, 8 Dec 2017 10:10:40 -0700 Subject: VMware: Support for user provided pre and post-customization scripts In the VMware customization workflow, we have some options for the user to upload scripts for additional customization. Based on user request, those custom scripts can be either run before regular customization or after. For post customization scripts, we decide whether to run the scripts just after customization or post system reboot. --- cloudinit/sources/DataSourceOVF.py | 125 ++++++++++++----- cloudinit/sources/helpers/vmware/imc/config.py | 4 + .../helpers/vmware/imc/config_custom_script.py | 153 +++++++++++++++++++++ cloudinit/sources/helpers/vmware/imc/config_nic.py | 2 +- tests/unittests/test_datasource/test_ovf.py | 111 ++++++++++++++- tests/unittests/test_vmware/__init__.py | 0 tests/unittests/test_vmware/test_custom_script.py | 99 +++++++++++++ tests/unittests/test_vmware_config_file.py | 7 + 8 files changed, 459 insertions(+), 42 deletions(-) create mode 100644 cloudinit/sources/helpers/vmware/imc/config_custom_script.py create mode 100644 tests/unittests/test_vmware/__init__.py create mode 100644 tests/unittests/test_vmware/test_custom_script.py (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 6ac621f2..6e62f984 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -21,6 +21,8 @@ from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config \ import Config +from cloudinit.sources.helpers.vmware.imc.config_custom_script \ + import PreCustomScript, PostCustomScript from cloudinit.sources.helpers.vmware.imc.config_file \ import ConfigFile from cloudinit.sources.helpers.vmware.imc.config_nic \ @@ -30,7 +32,7 @@ from cloudinit.sources.helpers.vmware.imc.config_passwd \ from cloudinit.sources.helpers.vmware.imc.guestcust_error \ import GuestCustErrorEnum from cloudinit.sources.helpers.vmware.imc.guestcust_event \ - import GuestCustEventEnum + import GuestCustEventEnum as GuestCustEvent from cloudinit.sources.helpers.vmware.imc.guestcust_state \ import GuestCustStateEnum from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( @@ -127,17 +129,31 @@ class DataSourceOVF(sources.DataSource): self._vmware_cust_conf = Config(cf) (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) self._vmware_nics_to_enable = get_nics_to_enable(nicspath) - markerid = self._vmware_cust_conf.marker_id - markerexists = check_marker_exists(markerid) + imcdirpath = os.path.dirname(vmwareImcConfigFilePath) + product_marker = self._vmware_cust_conf.marker_id + hasmarkerfile = check_marker_exists( + product_marker, os.path.join(self.paths.cloud_dir, 'data')) + special_customization = product_marker and not hasmarkerfile + customscript = self._vmware_cust_conf.custom_script_name except Exception as e: - LOG.debug("Error parsing the customization Config File") - LOG.exception(e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) - raise e - finally: - util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) + _raise_error_status( + "Error parsing the customization Config File", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + + if special_customization: + if customscript: + try: + precust = PreCustomScript(customscript, imcdirpath) + precust.execute() + except Exception as e: + _raise_error_status( + "Error executing pre-customization script", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + try: LOG.debug("Preparing the Network configuration") self._network_config = get_network_config_from_conf( @@ -146,13 +162,13 @@ class DataSourceOVF(sources.DataSource): True, self.distro.osfamily) except Exception as e: - LOG.exception(e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) - raise e + _raise_error_status( + "Error preparing Network Configuration", + e, + GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED, + vmwareImcConfigFilePath) - if markerid and not markerexists: + if special_customization: LOG.debug("Applying password customization") pwdConfigurator = PasswordConfigurator() adminpwd = self._vmware_cust_conf.admin_password @@ -164,27 +180,41 @@ class DataSourceOVF(sources.DataSource): else: LOG.debug("Changing password is not needed") except Exception as e: - LOG.debug("Error applying Password Configuration: %s", e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) - return False - if markerid: - LOG.debug("Handle marker creation") + _raise_error_status( + "Error applying Password Configuration", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + + if customscript: + try: + postcust = PostCustomScript(customscript, imcdirpath) + postcust.execute() + except Exception as e: + _raise_error_status( + "Error executing post-customization script", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) + + if product_marker: try: - setup_marker_files(markerid) + setup_marker_files( + product_marker, + os.path.join(self.paths.cloud_dir, 'data')) except Exception as e: - LOG.debug("Error creating marker files: %s", e) - set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) - return False + _raise_error_status( + "Error creating marker files", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath) self._vmware_cust_found = True found.append('vmware-tools') # TODO: Need to set the status to DONE only when the # customization is done successfully. + util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) enable_nics(self._vmware_nics_to_enable) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, @@ -539,31 +569,52 @@ def get_datasource_list(depends): # To check if marker file exists -def check_marker_exists(markerid): +def check_marker_exists(markerid, marker_dir): """ Check the existence of a marker file. Presence of marker file determines whether a certain code path is to be executed. It is needed for partial guest customization in VMware. + @param markerid: is an unique string representing a particular product + marker. + @param: marker_dir: The directory in which markers exist. """ if not markerid: return False - markerfile = "/.markerfile-" + markerid + markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") if os.path.exists(markerfile): return True return False # Create a marker file -def setup_marker_files(markerid): +def setup_marker_files(markerid, marker_dir): """ Create a new marker file. Marker files are unique to a full customization workflow in VMware environment. + @param markerid: is an unique string representing a particular product + marker. + @param: marker_dir: The directory in which markers exist. + """ - if not markerid: - return - markerfile = "/.markerfile-" + markerid - util.del_file("/.markerfile-*.txt") + LOG.debug("Handle marker creation") + markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt") + for fname in os.listdir(marker_dir): + if fname.startswith(".markerfile"): + util.del_file(os.path.join(marker_dir, fname)) open(markerfile, 'w').close() + +def _raise_error_status(prefix, error, event, config_file): + """ + Raise error and send customization status to the underlying VMware + Virtualization Platform. Also, cleanup the imc directory. + """ + LOG.debug('%s: %s', prefix, error) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + event) + util.del_dir(os.path.dirname(config_file)) + raise error + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index 49d441db..2eaeff34 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -100,4 +100,8 @@ class Config(object): """Returns marker id.""" return self._configFile.get(Config.MARKERID, None) + @property + def custom_script_name(self): + """Return the name of custom (pre/post) script.""" + return self._configFile.get(Config.CUSTOM_SCRIPT, None) # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py new file mode 100644 index 00000000..a7d4ad91 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py @@ -0,0 +1,153 @@ +# Copyright (C) 2017 Canonical Ltd. +# Copyright (C) 2017 VMware Inc. +# +# Author: Maitreyee Saikia +# +# This file is part of cloud-init. See LICENSE file for license information. + +import logging +import os +import stat +from textwrap import dedent + +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class CustomScriptNotFound(Exception): + pass + + +class CustomScriptConstant(object): + RC_LOCAL = "/etc/rc.local" + POST_CUST_TMP_DIR = "/root/.customization" + POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh" + POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR, + POST_CUST_RUN_SCRIPT_NAME) + POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + + +class RunCustomScript(object): + def __init__(self, scriptname, directory): + self.scriptname = scriptname + self.directory = directory + self.scriptpath = os.path.join(directory, scriptname) + + def prepare_script(self): + if not os.path.exists(self.scriptpath): + raise CustomScriptNotFound("Script %s not found!! " + "Cannot execute custom script!" + % self.scriptpath) + # Strip any CR characters from the decoded script + util.load_file(self.scriptpath).replace("\r", "") + st = os.stat(self.scriptpath) + os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC) + + +class PreCustomScript(RunCustomScript): + def execute(self): + """Executing custom script with precustomization argument.""" + LOG.debug("Executing pre-customization script") + self.prepare_script() + util.subp(["/bin/sh", self.scriptpath, "precustomization"]) + + +class PostCustomScript(RunCustomScript): + def __init__(self, scriptname, directory): + super(PostCustomScript, self).__init__(scriptname, directory) + # Determine when to run custom script. When postreboot is True, + # the user uploaded script will run as part of rc.local after + # the machine reboots. This is determined by presence of rclocal. + # When postreboot is False, script will run as part of cloud-init. + self.postreboot = False + + def _install_post_reboot_agent(self, rclocal): + """ + Install post-reboot agent for running custom script after reboot. + As part of this process, we are editing the rclocal file to run a + VMware script, which in turn is resposible for handling the user + script. + @param: path to rc local. + """ + LOG.debug("Installing post-reboot customization from %s to %s", + self.directory, rclocal) + if not self.has_previous_agent(rclocal): + LOG.info("Adding post-reboot customization agent to rc.local") + new_content = dedent(""" + # Run post-reboot guest customization + /bin/sh %s + exit 0 + """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT + existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') + st = os.stat(rclocal) + # "x" flag should be set + mode = st.st_mode | stat.S_IEXEC + util.write_file(rclocal, existing_rclocal + new_content, mode) + + else: + # We don't need to update rclocal file everytime a customization + # is requested. It just needs to be done for the first time. + LOG.info("Post-reboot guest customization agent is already " + "registered in rc.local") + LOG.debug("Installing post-reboot customization agent finished: %s", + self.postreboot) + + def has_previous_agent(self, rclocal): + searchstring = "# Run post-reboot guest customization" + if searchstring in open(rclocal).read(): + return True + return False + + def find_rc_local(self): + """ + Determine if rc local is present. + """ + rclocal = "" + if os.path.exists(CustomScriptConstant.RC_LOCAL): + LOG.debug("rc.local detected.") + # resolving in case of symlink + rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL) + LOG.debug("rc.local resolved to %s", rclocal) + else: + LOG.warning("Can't find rc.local, post-customization " + "will be run before reboot") + return rclocal + + def install_agent(self): + rclocal = self.find_rc_local() + if rclocal: + self._install_post_reboot_agent(rclocal) + self.postreboot = True + + def execute(self): + """ + This method executes post-customization script before or after reboot + based on the presence of rc local. + """ + self.prepare_script() + self.install_agent() + if not self.postreboot: + LOG.warning("Executing post-customization script inline") + util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) + else: + LOG.debug("Scheduling custom script to run post reboot") + if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): + os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) + # Script "post-customize-guest.sh" and user uploaded script are + # are present in the same directory and needs to copied to a temp + # directory to be executed post reboot. User uploaded script is + # saved as customize.sh in the temp directory. + # post-customize-guest.sh excutes customize.sh after reboot. + LOG.debug("Copying post-customization script") + util.copy(self.scriptpath, + CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") + LOG.debug("Copying script to run post-customization script") + util.copy( + os.path.join(self.directory, + CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), + CustomScriptConstant.POST_CUST_RUN_SCRIPT) + LOG.info("Creating post-reboot pending marker") + util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 2fb07c59..2d8900e2 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -161,7 +161,7 @@ class NicConfigurator(object): if nic.primary and v4.gateways: self.ipv4PrimaryGateway = v4.gateways[0] subnet.update({'gateway': self.ipv4PrimaryGateway}) - return [subnet] + return ([subnet], route_list) # Add routes if there is no primary nic if not self._primaryNic: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 700da86c..fc4eb36e 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -5,11 +5,17 @@ # This file is part of cloud-init. See LICENSE file for license information. import base64 -from collections import OrderedDict +import os -from cloudinit.tests import helpers as test_helpers +from collections import OrderedDict +from textwrap import dedent +from cloudinit import util +from cloudinit.tests.helpers import CiTestCase, wrap_and_call +from cloudinit.helpers import Paths from cloudinit.sources import DataSourceOVF as dsovf +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptNotFound) OVF_ENV_CONTENT = """ +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptConstant, + CustomScriptNotFound, + PreCustomScript, + PostCustomScript, +) +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestVmwareCustomScript(CiTestCase): + def setUp(self): + self.tmpDir = self.tmp_dir() + + def test_prepare_custom_script(self): + """ + This test is designed to verify the behavior based on the presence of + custom script. Mainly needed for scenario where a custom script is + expected, but was not properly copied. "CustomScriptNotFound" exception + is raised in such cases. + """ + # Custom script does not exist. + preCust = PreCustomScript("random-vmw-test", self.tmpDir) + self.assertEqual("random-vmw-test", preCust.scriptname) + self.assertEqual(self.tmpDir, preCust.directory) + self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), + preCust.scriptpath) + with self.assertRaises(CustomScriptNotFound): + preCust.prepare_script() + + # Custom script exists. + custScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(custScript, "test-CR-strip/r/r") + postCust = PostCustomScript("test-cust", self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + self.assertFalse(postCust.postreboot) + postCust.prepare_script() + # Check if all carraige returns are stripped from script. + self.assertFalse("/r" in custScript) + + def test_rc_local_exists(self): + """ + This test is designed to verify the different scenarios associated + with the presence of rclocal. + """ + # test when rc local does not exist + postCust = PostCustomScript("test-cust", self.tmpDir) + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", "/no/path"): + rclocal = postCust.find_rc_local() + self.assertEqual("", rclocal) + + # test when rc local exists + rclocalFile = self.tmp_path("vmware-rclocal", self.tmpDir) + util.write_file(rclocalFile, "# Run post-reboot guest customization", + omode="w") + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalFile): + rclocal = postCust.find_rc_local() + self.assertEqual(rclocalFile, rclocal) + self.assertTrue(postCust.has_previous_agent, rclocal) + + # test when rc local is a symlink + rclocalLink = self.tmp_path("dummy-rclocal-link", self.tmpDir) + util.sym_link(rclocalFile, rclocalLink, True) + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalLink): + rclocal = postCust.find_rc_local() + self.assertEqual(rclocalFile, rclocal) + + def test_execute_post_cust(self): + """ + This test is to identify if rclocal was properly populated to be + run after reboot. + """ + customscript = self.tmp_path("vmware-post-cust-script", self.tmpDir) + rclocal = self.tmp_path("vmware-rclocal", self.tmpDir) + # Create a temporary rclocal file + open(customscript, "w") + util.write_file(rclocal, "tests\nexit 0", omode="w") + postCust = PostCustomScript("vmware-post-cust-script", self.tmpDir) + with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocal): + # Test that guest customization agent is not installed initially. + self.assertFalse(postCust.postreboot) + self.assertIs(postCust.has_previous_agent(rclocal), False) + postCust.install_agent() + + # Assert rclocal has been modified to have guest customization + # agent. + self.assertTrue(postCust.postreboot) + self.assertTrue(postCust.has_previous_agent, rclocal) + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 0f8cda95..036f6879 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -335,5 +335,12 @@ class TestVmwareConfigFile(CiTestCase): self.assertEqual('255.255.0.0', subnet.get('netmask'), 'Subnet netmask') + def test_custom_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.custom_script_name) + cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script") + conf = Config(cf) + self.assertEqual("test-script", conf.custom_script_name) # vi: ts=4 expandtab -- cgit v1.2.3 From 703241a3c50f2cfec21e7c8e90616c3378ebbea2 Mon Sep 17 00:00:00 2001 From: Andrew Jorgensen Date: Mon, 27 Nov 2017 21:54:09 +0000 Subject: ec2: Use instance-identity doc for region and instance-id The instance identity document is a better source for region information, partly because region isn't actually in meta-data at all, only availability-zone, which happens to be named similarly. Reviewed-by: Ethan Faust Reviewed-by: Cyle Riggs Reviewed-by: Tom Kirchner Reviewed-by: Matt Nierzwicki [ajorgens@amazon.com: rebase onto 0.7.9] [ajorgens@amazon.com: changes per merge proposal discussions] --- cloudinit/distros/__init__.py | 15 +++++----- cloudinit/ec2_utils.py | 39 ++++++++++++++++++++------ cloudinit/sources/DataSourceEc2.py | 30 ++++++++++++++++---- tests/unittests/test_datasource/test_aliyun.py | 16 +++++++++++ 4 files changed, 78 insertions(+), 22 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 99e60e7a..55260eae 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -45,6 +45,10 @@ OSFAMILIES = { LOG = logging.getLogger(__name__) +# This is a best guess regex, based on current EC2 AZs on 2017-12-11. +# It could break when Amazon adds new regions and new AZs. +_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') + @six.add_metaclass(abc.ABCMeta) class Distro(object): @@ -683,18 +687,13 @@ def _get_package_mirror_info(mirror_info, data_source=None, if not mirror_info: mirror_info = {} - # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) - # the region is us-east-1. so region = az[0:-1] - directions_re = '|'.join([ - 'central', 'east', 'north', 'northeast', 'northwest', - 'south', 'southeast', 'southwest', 'west']) - ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re) - subst = {} if data_source and data_source.availability_zone: subst['availability_zone'] = data_source.availability_zone - if re.match(ec2_az_re, data_source.availability_zone): + # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) + # the region is us-east-1. so region = az[0:-1] + if _EC2_AZ_RE.match(data_source.availability_zone): subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] if data_source and data_source.region: diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 723d6bd6..d6c61e4c 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -1,6 +1,8 @@ # Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2014 Amazon.com, Inc. or its affiliates. # # Author: Joshua Harlow +# Author: Andrew Jorgensen # # This file is part of cloud-init. See LICENSE file for license information. @@ -164,14 +166,11 @@ def get_instance_userdata(api_version='latest', return user_data -def get_instance_metadata(api_version='latest', - metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): - md_url = url_helper.combine_url(metadata_address, api_version) - # Note, 'meta-data' explicitly has trailing /. - # this is required for CloudStack (LP: #1356855) - md_url = url_helper.combine_url(md_url, 'meta-data/') +def _get_instance_metadata(tree, api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): + md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial(util.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries) @@ -189,7 +188,29 @@ def get_instance_metadata(api_version='latest', md = {} return md except Exception: - util.logexc(LOG, "Failed fetching metadata from url %s", md_url) + util.logexc(LOG, "Failed fetching %s from url %s", tree, md_url) return {} + +def get_instance_metadata(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): + # Note, 'meta-data' explicitly has trailing /. + # this is required for CloudStack (LP: #1356855) + return _get_instance_metadata(tree='meta-data/', api_version=api_version, + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, + retries=retries, leaf_decoder=leaf_decoder) + + +def get_instance_identity(api_version='latest', + metadata_address='http://169.254.169.254', + ssl_details=None, timeout=5, retries=5, + leaf_decoder=None): + return _get_instance_metadata(tree='dynamic/instance-identity', + api_version=api_version, + metadata_address=metadata_address, + ssl_details=ssl_details, timeout=timeout, + retries=retries, leaf_decoder=leaf_decoder) # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index e5c88334..0f89f34d 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -154,7 +154,12 @@ class DataSourceEc2(sources.DataSource): return self.min_metadata_version def get_instance_id(self): - return self.metadata['instance-id'] + if self.cloud_platform == Platforms.AWS: + # Prefer the ID from the instance identity document, but fall back + return self.identity.get( + 'instanceId', self.metadata['instance-id']) + else: + return self.metadata['instance-id'] def _get_url_settings(self): mcfg = self.ds_cfg @@ -268,15 +273,27 @@ class DataSourceEc2(sources.DataSource): @property def availability_zone(self): try: - return self.metadata['placement']['availability-zone'] + if self.cloud_platform == Platforms.AWS: + return self.identity.get( + 'availabilityZone', + self.metadata['placement']['availability-zone']) + else: + return self.metadata['placement']['availability-zone'] except KeyError: return None @property def region(self): - az = self.availability_zone - if az is not None: - return az[:-1] + if self.cloud_platform == Platforms.AWS: + region = self.identity.get('region') + # Fallback to trimming the availability zone if region is missing + if self.availability_zone and not region: + region = self.availability_zone[:-1] + return region + else: + az = self.availability_zone + if az is not None: + return az[:-1] return None @property @@ -357,6 +374,9 @@ class DataSourceEc2(sources.DataSource): api_version, self.metadata_address) self.metadata = ec2.get_instance_metadata( api_version, self.metadata_address) + if self.cloud_platform == Platforms.AWS: + self.identity = ec2.get_instance_identity( + api_version, self.metadata_address).get('document', {}) except Exception: util.logexc( LOG, "Failed reading from metadata address %s", diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index 714f5dac..4fa9616b 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -47,6 +47,9 @@ def register_mock_metaserver(base_url, data): elif isinstance(body, list): register(base_url.rstrip('/'), '\n'.join(body) + '\n') elif isinstance(body, dict): + if not body: + register(base_url.rstrip('/') + '/', 'not found', + status_code=404) vals = [] for k, v in body.items(): if isinstance(v, (str, list)): @@ -91,9 +94,22 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): self.metadata_address, self.ds.min_metadata_version, 'user-data') + # EC2 provides an instance-identity document which must return 404 here + # for this test to pass. + @property + def default_identity(self): + return {} + + @property + def identity_url(self): + return os.path.join(self.metadata_address, + self.ds.min_metadata_version, + 'dynamic', 'instance-identity') + def regist_default_server(self): register_mock_metaserver(self.metadata_url, self.default_metadata) register_mock_metaserver(self.userdata_url, self.default_userdata) + register_mock_metaserver(self.identity_url, self.default_identity) def _test_get_data(self): self.assertEqual(self.ds.metadata, self.default_metadata) -- cgit v1.2.3 From c87588bf1e118095445ed0713f60866592b36ca1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 14 Dec 2017 12:33:52 -0500 Subject: release 17.2 Bump the version in cloudinit/version.py to be 17.2 and update ChangeLog. --- ChangeLog | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++ cloudinit/version.py | 2 +- 2 files changed, 86 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index 0260c576..31c2dcbb 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,88 @@ +17.2: + - ds-identify: failure in NoCloud due to unset variable usage. + (LP: #1737704) + - tests: fix collect_console when not implemented [Joshua Powers] + - ec2: Use instance-identity doc for region and instance-id + [Andrew Jorgensen] + - tests: remove leaked tmp files in config drive tests. + - setup.py: Do not include rendered files in SOURCES.txt + - SUSE: remove delta in systemd local template for SUSE [Robert Schweikert] + - tests: move to using tox 1.7.5 + - OVF: improve ds-identify to support finding OVF iso transport. + (LP: #1731868) + - VMware: Support for user provided pre and post-customization scripts + [Maitreyee Saikia] + - citest: In NoCloudKVM provide keys via metadata not userdata. + - pylint: Update pylint to 1.7.1, run on tests/ and tools and fix + complaints. + - Datasources: Formalize DataSource get_data and related properties. + - cli: Add clean and status subcommands + - tests: consolidate platforms into specific dirs + - ec2: Fix sandboxed dhclient background process cleanup. (LP: #1735331) + - tests: NoCloudKVMImage do not modify the original local cache image. + - tests: Enable bionic in integration tests. [Joshua Powers] + - tests: Use apt-get to install a deb so that depends get resolved. + - sysconfig: Correctly render dns and dns search info. + [Ryan McCabe] (LP: #1705804) + - integration test: replace curtin test ppa with cloud-init test ppa. + - EC2: Fix bug using fallback_nic and metadata when restoring from cache. + (LP: #1732917) + - EC2: Kill dhclient process used in sandbox dhclient. (LP: #1732964) + - ntp: fix configuration template rendering for openSUSE and SLES + (LP: #1726572) + - centos: Provide the failed #include url in error messages + - Catch UrlError when #include'ing URLs [Andrew Jorgensen] + - hosts: Fix openSUSE and SLES setup for /etc/hosts and clarify docs. + [Robert Schweikert] (LP: #1731022) + - rh_subscription: Perform null checks for enabled and disabled repos. + [Dave Mulford] + - Improve warning message when a template is not found. + [Robert Schweikert] (LP: #1731035) + - Replace the temporary i9n.brickies.net with i9n.cloud-init.io. + - Azure: don't generate network configuration for SRIOV devices + (LP: #1721579) + - tests: address some minor feedback missed in last merge. + - tests: integration test cleanup and full pass of nocloud-kvm. + - Gentoo: chmod +x on all files in sysvinit/gentoo/ + [ckonstanski] (LP: #1727126) + - EC2: Limit network config to fallback nic, fix local-ipv4 only + instances. (LP: #1728152) + - Gentoo: Use "rc-service" rather than "service". + [Carlos Konstanski] (LP: #1727121) + - resizefs: Fix regression when system booted with root=PARTUUID= + (LP: #1725067) + - tools: make yum package installation more reliable + - citest: fix remaining warnings raised by integration tests. + - citest: show the class actual class name in results. + - ntp: fix config module schema to allow empty ntp config (LP: #1724951) + - tools: disable fastestmirror if using proxy [Joshua Powers] + - schema: Log debug instead of warning when jsonschema is not available. + (LP: #1724354) + - simpletable: Fix get_string method to return table-formatted string + (LP: #1722566) + - net: Handle bridge stp values of 0 and convert to boolean type + - tools: Give specific --abbrev=8 to "git describe" + - network: bridge_stp value not always correct (LP: #1721157) + - tests: re-enable tox with nocloud-kvm support [Joshua Powers] + - systemd: remove limit on tasks created by cloud-init-final.service. + [Robert Schweikert] (LP: #1717969) + - suse: Support addition of zypper repos via cloud-config. + [Robert Schweikert] (LP: #1718675) + - tests: Combine integration configs and testcases [Joshua Powers] + - Azure, CloudStack: Support reading dhcp options from systemd-networkd. + [Dimitri John Ledkov] (LP: #1718029) + - packages/debian/copyright: remove mention of boto and MIT license + - systemd: only mention Before=apt-daily.service on debian based distros. + [Robert Schweikert] + - Add missing simpletable and simpletable tests for failed merge + - Remove prettytable dependency, introduce simpletable [Andrew Jorgensen] + - debian/copyright: dep5 updates, reorganize, add Apache 2.0 license. + [Joshua Powers] (LP: #1718681) + - tests: remove dependency on shlex [Joshua Powers] + - AltCloud: Trust PATH for udevadm and modprobe. + - DataSourceOVF: use util.find_devs_with(TYPE=iso9660) (LP: #1718287) + - tests: remove a temp file used in bootcmd tests. + 17.1: - doc: document GCE datasource. [Arnd Hannemann] - suse: updates to templates to support openSUSE and SLES. diff --git a/cloudinit/version.py b/cloudinit/version.py index 3255f399..be6262d6 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "17.1" +__VERSION__ = "17.2" FEATURES = [ # supports network config version 1 -- cgit v1.2.3 From 4089e20c0a20bc2ad5c21b106687c4f3faf84b4b Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 14 Dec 2017 22:06:29 -0700 Subject: cli: Fix error in cloud-init modules --mode=init. The cli help docs and argument parser allow the 'init' mode value which caused a traceback. Fix the cli to support 'init', 'config' and 'final' modes for the cloud-init modules subcommand. Add a check in the cli to raise a ValueError if a new subcommand ends up allowing an unsupported/unimplemented modes. Drive by unit test additions for a bit better coverage of error handling. LP: #1736600 --- cloudinit/cmd/main.py | 18 +++++++---- tests/unittests/test_cli.py | 75 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 6 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index aa56225d..30b37fe1 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -603,7 +603,11 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ('init', 'init-local', 'modules-config', 'modules-final') + modes = ('init', 'init-local', 'modules-init', 'modules-config', + 'modules-final') + if mode not in modes: + raise ValueError( + "Invalid cloud init mode specified '{0}'".format(mode)) status = None if mode == 'init-local': @@ -615,16 +619,18 @@ def status_wrapper(name, args, data_d=None, link_d=None): except Exception: pass + nullstatus = { + 'errors': [], + 'start': None, + 'finished': None, + } if status is None: - nullstatus = { - 'errors': [], - 'start': None, - 'finished': None, - } status = {'v1': {}} for m in modes: status['v1'][m] = nullstatus.copy() status['v1']['datasource'] = None + elif mode not in status['v1']: + status['v1'][mode] = nullstatus.copy() v1 = status['v1'] v1['stage'] = mode diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index a8d28ae6..0c0f427a 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,9 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +from collections import namedtuple +import os import six from cloudinit.cmd import main as cli from cloudinit.tests import helpers as test_helpers +from cloudinit.util import load_file, load_json mock = test_helpers.mock @@ -11,6 +14,8 @@ mock = test_helpers.mock class TestCLI(test_helpers.FilesystemMockingTestCase): + with_logs = True + def setUp(self): super(TestCLI, self).setUp() self.stderr = six.StringIO() @@ -24,6 +29,76 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): except SystemExit as e: return e.code + def test_status_wrapper_errors_on_invalid_name(self): + """status_wrapper will error when the name parameter is not valid. + + Valid name values are only init and modules. + """ + tmpd = self.tmp_dir() + data_d = self.tmp_path('data', tmpd) + link_d = self.tmp_path('link', tmpd) + FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + + def myaction(): + raise Exception('Should not call myaction') + + myargs = FakeArgs(('doesnotmatter', myaction), False, 'bogusmode') + with self.assertRaises(ValueError) as cm: + cli.status_wrapper('init1', myargs, data_d, link_d) + self.assertEqual('unknown name: init1', str(cm.exception)) + self.assertNotIn('Should not call myaction', self.logs.getvalue()) + + def test_status_wrapper_errors_on_invalid_modes(self): + """status_wrapper will error if a parameter combination is invalid.""" + tmpd = self.tmp_dir() + data_d = self.tmp_path('data', tmpd) + link_d = self.tmp_path('link', tmpd) + FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + + def myaction(): + raise Exception('Should not call myaction') + + myargs = FakeArgs(('modules_name', myaction), False, 'bogusmode') + with self.assertRaises(ValueError) as cm: + cli.status_wrapper('modules', myargs, data_d, link_d) + self.assertEqual( + "Invalid cloud init mode specified 'modules-bogusmode'", + str(cm.exception)) + self.assertNotIn('Should not call myaction', self.logs.getvalue()) + + def test_status_wrapper_init_local_writes_fresh_status_info(self): + """When running in init-local mode, status_wrapper writes status.json. + + Old status and results artifacts are also removed. + """ + tmpd = self.tmp_dir() + data_d = self.tmp_path('data', tmpd) + link_d = self.tmp_path('link', tmpd) + status_link = self.tmp_path('status.json', link_d) + # Write old artifacts which will be removed or updated. + for _dir in data_d, link_d: + test_helpers.populate_dir( + _dir, {'status.json': 'old', 'result.json': 'old'}) + + FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + + def myaction(name, args): + # Return an error to watch status capture them + return 'SomeDatasource', ['an error'] + + myargs = FakeArgs(('ignored_name', myaction), True, 'bogusmode') + cli.status_wrapper('init', myargs, data_d, link_d) + # No errors reported in status + status_v1 = load_json(load_file(status_link))['v1'] + self.assertEqual(['an error'], status_v1['init-local']['errors']) + self.assertEqual('SomeDatasource', status_v1['datasource']) + self.assertFalse( + os.path.exists(self.tmp_path('result.json', data_d)), + 'unexpected result.json found') + self.assertFalse( + os.path.exists(self.tmp_path('result.json', link_d)), + 'unexpected result.json link found') + def test_no_arguments_shows_usage(self): exit_code = self._call_main() self.assertIn('usage: cloud-init', self.stderr.getvalue()) -- cgit v1.2.3 From c6a6f59e80f1fd62562b1fe9acfd45e1cee3cbe8 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 15 Dec 2017 15:24:53 -0700 Subject: lint: Fix lints seen by pylint version 1.8.1. This branch resolves lints seen by pylint revision 1.8.1 and updates our pinned tox pylint dependency used by our tox pylint target. --- cloudinit/config/cc_apt_configure.py | 5 +++-- cloudinit/config/cc_disk_setup.py | 8 +++++--- cloudinit/config/cc_landscape.py | 8 ++++---- cloudinit/config/cc_ntp.py | 10 +++++----- cloudinit/config/cc_seed_random.py | 3 ++- cloudinit/config/cc_snap_config.py | 7 +++++-- cloudinit/net/cmdline.py | 9 +++++---- cloudinit/net/network_state.py | 8 +++++--- cloudinit/sources/DataSourceAltCloud.py | 2 +- cloudinit/sources/DataSourceAzure.py | 6 +++--- cloudinit/sources/DataSourceOpenNebula.py | 5 +++-- cloudinit/sources/helpers/azure.py | 3 ++- cloudinit/util.py | 3 ++- tox.ini | 2 +- 14 files changed, 46 insertions(+), 33 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 177cbcf7..5b9cbca0 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -275,8 +275,9 @@ def handle(name, ocfg, cloud, log, _): cfg = ocfg.get('apt', {}) if not isinstance(cfg, dict): - raise ValueError("Expected dictionary for 'apt' config, found %s", - type(cfg)) + raise ValueError( + "Expected dictionary for 'apt' config, found {config_type}".format( + config_type=type(cfg))) apply_debconf_selections(cfg, target) apply_apt(cfg, cloud, target) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index c2b83aea..c3e8c484 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -788,7 +788,8 @@ def mkpart(device, definition): # This prevents you from overwriting the device LOG.debug("Checking if device %s is a valid device", device) if not is_device_valid(device): - raise Exception("Device %s is not a disk device!", device) + raise Exception( + 'Device {device} is not a disk device!'.format(device=device)) # Remove the partition table entries if isinstance(layout, str) and layout.lower() == "remove": @@ -945,8 +946,9 @@ def mkfs(fs_cfg): # Check that we can create the FS if not (fs_type or fs_cmd): - raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd " - "must be set.", label) + raise Exception( + "No way to create filesystem '{label}'. fs_type or fs_cmd " + "must be set.".format(label=label)) # Create the commands shell = False diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 8f9f1abd..eaf1e940 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -94,10 +94,10 @@ def handle(_name, cfg, cloud, log, _args): ls_cloudcfg = cfg.get("landscape", {}) if not isinstance(ls_cloudcfg, (dict)): - raise RuntimeError(("'landscape' key existed in config," - " but not a dictionary type," - " is a %s instead"), - type_utils.obj_name(ls_cloudcfg)) + raise RuntimeError( + "'landscape' key existed in config, but not a dictionary type," + " is a {_type} instead".format( + _type=type_utils.obj_name(ls_cloudcfg))) if not ls_cloudcfg: return diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index f50bcb35..cbd0237d 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -106,9 +106,9 @@ def handle(name, cfg, cloud, log, _args): # TODO drop this when validate_cloudconfig_schema is strict=True if not isinstance(ntp_cfg, (dict)): - raise RuntimeError(("'ntp' key existed in config," - " but not a dictionary type," - " is a %s %instead"), type_utils.obj_name(ntp_cfg)) + raise RuntimeError( + "'ntp' key existed in config, but not a dictionary type," + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) validate_cloudconfig_schema(cfg, schema) if ntp_installable(): @@ -206,8 +206,8 @@ def write_ntp_config_template(cfg, cloud, path, template=None): if not template_fn: template_fn = cloud.get_template_filename('ntp.conf') if not template_fn: - raise RuntimeError(("No template found, " - "not rendering %s"), path) + raise RuntimeError( + 'No template found, not rendering {path}'.format(path=path)) templater.render_to_file(template_fn, path, params) diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index e76b9c09..65f6e777 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -95,7 +95,8 @@ def handle_random_seed_command(command, required, env=None): cmd = command[0] if not util.which(cmd): if required: - raise ValueError("command '%s' not found but required=true", cmd) + raise ValueError( + "command '{cmd}' not found but required=true".format(cmd=cmd)) else: LOG.debug("command '%s' not found for seed_command", cmd) return diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py index fe0cc73e..e82c0811 100644 --- a/cloudinit/config/cc_snap_config.py +++ b/cloudinit/config/cc_snap_config.py @@ -87,7 +87,9 @@ def add_assertions(assertions=None): assertions = [] if not isinstance(assertions, list): - raise ValueError('assertion parameter was not a list: %s', assertions) + raise ValueError( + 'assertion parameter was not a list: {assertions}'.format( + assertions=assertions)) snap_cmd = [SNAPPY_CMD, 'ack'] combined = "\n".join(assertions) @@ -115,7 +117,8 @@ def add_snap_user(cfg=None): cfg = {} if not isinstance(cfg, dict): - raise ValueError('configuration parameter was not a dict: %s', cfg) + raise ValueError( + 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg)) snapuser = cfg.get('email', None) if not snapuser: diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 38b27a52..7b2cc9db 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -116,10 +116,11 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None): prev = names[name]['entry'] if prev.get('mac_address') != entry.get('mac_address'): raise ValueError( - "device '%s' was defined multiple times (%s)" - " but had differing mac addresses: %s -> %s.", - (name, ' '.join(names[name]['files']), - prev.get('mac_address'), entry.get('mac_address'))) + "device '{name}' was defined multiple times ({files})" + " but had differing mac addresses: {old} -> {new}.".format( + name=name, files=' '.join(names[name]['files']), + old=prev.get('mac_address'), + new=entry.get('mac_address'))) prev['subnets'].extend(entry['subnets']) names[name]['files'].append(cfg_file) else: diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index e9e2cf4e..31738c73 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -474,8 +474,9 @@ class NetworkStateInterpreter(object): elif bridge_stp in ['off', '0', 0]: bridge_stp = False else: - raise ValueError("Cannot convert bridge_stp value" - "(%s) to boolean", bridge_stp) + raise ValueError( + 'Cannot convert bridge_stp value ({stp}) to' + ' boolean'.format(stp=bridge_stp)) iface.update({'bridge_stp': bridge_stp}) interfaces.update({iface['name']: iface}) @@ -692,7 +693,8 @@ class NetworkStateInterpreter(object): elif cmd_type == "bond": self.handle_bond(v1_cmd) else: - raise ValueError('Unknown command type: %s', cmd_type) + raise ValueError('Unknown command type: {cmd_type}'.format( + cmd_type=cmd_type)) def _v2_to_v1_ipcfg(self, cfg): """Common ipconfig extraction from v2 to v1 subnets array.""" diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index be2d6cf8..e1d0055b 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -145,7 +145,7 @@ class DataSourceAltCloud(sources.DataSource): else: cloud_type = self.get_cloud_type() - LOG.debug('cloud_type: ' + str(cloud_type)) + LOG.debug('cloud_type: %s', str(cloud_type)) if 'RHEV' in cloud_type: if self.user_data_rhevm(): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6978d4e5..e73b57b9 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -582,12 +582,12 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, if os.path.exists(sempath): try: os.unlink(sempath) - LOG.debug(bmsg + " removed.") + LOG.debug('%s removed.', bmsg) except Exception as e: # python3 throws FileNotFoundError, python2 throws OSError - LOG.warning(bmsg + ": remove failed! (%s)", e) + LOG.warning('%s: remove failed! (%s)', bmsg, e) else: - LOG.debug(bmsg + " did not exist.") + LOG.debug('%s did not exist.', bmsg) return diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 5da11847..f66c95d7 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -332,8 +332,9 @@ def read_context_disk_dir(source_dir, asuser=None): try: pwd.getpwnam(asuser) except KeyError as e: - raise BrokenContextDiskDir("configured user '%s' " - "does not exist", asuser) + raise BrokenContextDiskDir( + "configured user '{user}' does not exist".format( + user=asuser)) try: path = os.path.join(source_dir, 'context.sh') content = util.load_file(path) diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 959b1bda..6cda5721 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -274,7 +274,8 @@ class WALinuxAgentShim(object): name = os.path.basename(hook_file).replace('.json', '') dhcp_options[name] = json.loads(util.load_file((hook_file))) except ValueError: - raise ValueError("%s is not valid JSON data", hook_file) + raise ValueError( + '{_file} is not valid JSON data'.format(_file=hook_file)) return dhcp_options @staticmethod diff --git a/cloudinit/util.py b/cloudinit/util.py index 11e96a77..8a9f1ab2 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2327,7 +2327,8 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep): missing.append(f) if len(missing): - raise ValueError("Missing required files: %s", ','.join(missing)) + raise ValueError( + 'Missing required files: {files}'.format(files=','.join(missing))) return ret diff --git a/tox.ini b/tox.ini index 0802072a..fdc8a665 100644 --- a/tox.ini +++ b/tox.ini @@ -24,7 +24,7 @@ setenv = basepython = python3 deps = # requirements - pylint==1.7.5 + pylint==1.8.1 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt commands = {envpython} -m pylint {posargs:cloudinit tests tools} -- cgit v1.2.3 From b05b9972d20ec3ea699d1691b67314d04e852d2f Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 20 Dec 2017 12:46:49 -0700 Subject: Azure: Only bounce network when necessary. This fixes a traceback when attempting to bounce the network after hostname resets. In artful and bionic ifupdown package is no longer installed in default cloud images. As such, Azure can't use those tools to bounce the network informing DDNS about hostname changes. This doesn't affect DDNS updates though because systemd-networkd is now watching hostname deltas and with default behavior to SendHostname=True over dhcp for all hostname updates which publishes DDNS for us. LP: #1722668 --- cloudinit/sources/DataSourceAzure.py | 25 +++++++++++++++--------- tests/unittests/test_datasource/test_azure.py | 28 +++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 13 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e73b57b9..d1d09757 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -26,10 +26,16 @@ DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] AGENT_START_BUILTIN = "__builtin__" -BOUNCE_COMMAND = [ +BOUNCE_COMMAND_IFUP = [ 'sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" ] +BOUNCE_COMMAND_FREEBSD = [ + 'sh', '-xc', + ("i=$interface; x=0; ifconfig down $i || x=$?; " + "ifconfig up $i || x=$?; exit $x") +] + # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' @@ -177,11 +183,6 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") - BOUNCE_COMMAND = [ - 'sh', '-xc', - ("i=$interface; x=0; ifconfig down $i || x=$?; " - "ifconfig up $i || x=$?; exit $x") - ] BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -190,7 +191,7 @@ BUILTIN_DS_CONFIG = { 'hostname_bounce': { 'interface': DEFAULT_PRIMARY_NIC, 'policy': True, - 'command': BOUNCE_COMMAND, + 'command': 'builtin', 'hostname_command': 'hostname', }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, @@ -606,8 +607,14 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): env['old_hostname'] = prev_hostname if command == "builtin": - command = BOUNCE_COMMAND - + if util.is_FreeBSD(): + command = BOUNCE_COMMAND_FREEBSD + elif util.which('ifup'): + command = BOUNCE_COMMAND_IFUP + else: + LOG.debug( + "Skipping network bounce: ifupdown utils aren't present.") + return # Don't bounce as networkd handles hostname DDNS updates LOG.debug("pubhname: publishing hostname [%s]", msg) shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 5ab48897..6341e1e8 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -174,6 +174,7 @@ scbus-1 on xpt0 bus 0 (dsaz, 'get_hostname', mock.MagicMock()), (dsaz, 'set_hostname', mock.MagicMock()), (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), + (dsaz.util, 'which', lambda x: True), (dsaz.util, 'read_dmi_data', mock.MagicMock( side_effect=_dmi_mocks)), (dsaz.util, 'wait_for_files', mock.MagicMock( @@ -642,6 +643,8 @@ fdescfs /dev/fd fdescfs rw 0 0 class TestAzureBounce(CiTestCase): + with_logs = True + def mock_out_azure_moving_parts(self): self.patches.enter_context( mock.patch.object(dsaz, 'invoke_agent')) @@ -653,6 +656,8 @@ class TestAzureBounce(CiTestCase): self.patches.enter_context( mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) + self.patches.enter_context( + mock.patch.object(dsaz.util, 'which', lambda x: True)) def _dmi_mocks(key): if key == 'system-uuid': @@ -753,6 +758,22 @@ class TestAzureBounce(CiTestCase): self.assertTrue(ret) self.assertEqual(1, perform_hostname_bounce.call_count) + def test_bounce_skipped_on_ifupdown_absent(self): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'force'}} + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), + agent_command=['not', '__builtin__']) + patch_path = 'cloudinit.sources.DataSourceAzure.util.which' + with mock.patch(patch_path) as m_which: + m_which.return_value = None + ret = self._get_and_setup(dsrc) + self.assertEqual([mock.call('ifup')], m_which.call_args_list) + self.assertTrue(ret) + self.assertIn( + "Skipping network bounce: ifupdown utils aren't present.", + self.logs.getvalue()) + def test_different_hostnames_sets_hostname(self): expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' @@ -817,9 +838,7 @@ class TestAzureBounce(CiTestCase): self.assertEqual(hostname, bounce_env['hostname']) self.assertEqual(old_hostname, bounce_env['old_hostname']) - def test_default_bounce_command_used_by_default(self): - cmd = 'default-bounce-command' - dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd + def test_default_bounce_command_ifup_used_by_default(self): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) @@ -827,7 +846,8 @@ class TestAzureBounce(CiTestCase): self.assertTrue(ret) self.assertEqual(1, self.subp.call_count) bounce_args = self.subp.call_args[1]['args'] - self.assertEqual(cmd, bounce_args) + self.assertEqual( + dsaz.BOUNCE_COMMAND_IFUP, bounce_args) @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') def test_set_hostname_option_can_disable_bounce( -- cgit v1.2.3 From 0b5bacb1761aefa74adb79bd1683d614bdf8c998 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 3 Jan 2018 12:56:22 -0700 Subject: cli: cloud-init clean handles symlinks Fix cloud-init clean subcommand to unlink symlinks instead of calling del_dir. LP: #1741093 --- cloudinit/cmd/clean.py | 5 +++-- cloudinit/cmd/tests/test_clean.py | 19 ++++++++++++++++++- 2 files changed, 21 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 81797b1c..de22f7f2 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -10,7 +10,8 @@ import sys from cloudinit.stages import Init from cloudinit.util import ( - ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, subp) + ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, + is_link, subp) def error(msg): @@ -65,7 +66,7 @@ def remove_artifacts(remove_logs, remove_seed=False): if path == 'seed' and not remove_seed: continue try: - if os.path.isdir(path): + if os.path.isdir(path) and not is_link(path): del_dir(path) else: del_file(path) diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index 1379740b..6713af4f 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.cmd import clean -from cloudinit.util import ensure_dir, write_file +from cloudinit.util import ensure_dir, sym_link, write_file from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock from collections import namedtuple import os @@ -60,6 +60,23 @@ class TestClean(CiTestCase): self.assertTrue(os.path.exists(self.log2), 'Missing expected file') self.assertEqual(0, retcode) + def test_remove_artifacts_removes_unlinks_symlinks(self): + """remove_artifacts cleans artifacts dir unlinking any symlinks.""" + dir1 = os.path.join(self.artifact_dir, 'dir1') + ensure_dir(dir1) + symlink = os.path.join(self.artifact_dir, 'mylink') + sym_link(dir1, symlink) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(0, retcode) + for path in (dir1, symlink): + self.assertFalse( + os.path.exists(path), + 'Unexpected {0} dir'.format(path)) + def test_remove_artifacts_removes_artifacts_skipping_seed(self): """remove_artifacts cleans artifacts dir with exception of seed dir.""" dirs = [ -- cgit v1.2.3 From 5f550420d2ed9d9ef024293f33d33f0f2fc04ee5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 10 Jan 2018 13:53:17 -0700 Subject: MAAS: add check_instance_id based off oauth tokens. This stores a hash of the OAuth tokens as an 'id' for the maas datasource. Since new instances get new tokens created and those tokens are written by curtin into datasource system config this will provide a way to identify a new "instance" (install). LP: #1712680 --- cloudinit/sources/DataSourceMAAS.py | 54 ++++++++++++++++++++-------- tests/unittests/test_datasource/test_maas.py | 53 +++++++++++++++++++++++---- 2 files changed, 86 insertions(+), 21 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 496bd06a..6ac88635 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -8,6 +8,7 @@ from __future__ import print_function +import hashlib import os import time @@ -41,25 +42,20 @@ class DataSourceMAAS(sources.DataSource): """ dsname = "MAAS" + id_hash = None + _oauth_helper = None def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None self.seed_dir = os.path.join(paths.seed_dir, 'maas') - self.oauth_helper = self._get_helper() + self.id_hash = get_id_from_ds_cfg(self.ds_cfg) - def _get_helper(self): - mcfg = self.ds_cfg - # If we are missing token_key, token_secret or consumer_key - # then just do non-authed requests - for required in ('token_key', 'token_secret', 'consumer_key'): - if required not in mcfg: - return url_helper.OauthUrlHelper() - - return url_helper.OauthUrlHelper( - consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], - token_secret=mcfg['token_secret'], - consumer_secret=mcfg.get('consumer_secret')) + @property + def oauth_helper(self): + if not self._oauth_helper: + self._oauth_helper = get_oauth_helper(self.ds_cfg) + return self._oauth_helper def __str__(self): root = sources.DataSource.__str__(self) @@ -147,6 +143,36 @@ class DataSourceMAAS(sources.DataSource): return bool(url) + def check_instance_id(self, sys_cfg): + """locally check if the current system is the same instance. + + MAAS doesn't provide a real instance-id, and if it did, it is + still only available over the network. We need to check based + only on local resources. So compute a hash based on Oauth tokens.""" + if self.id_hash is None: + return False + ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {}) + return (self.id_hash == get_id_from_ds_cfg(ncfg)) + + +def get_oauth_helper(cfg): + """Return an oauth helper instance for values in cfg. + + @raises ValueError from OauthUrlHelper if some required fields have + true-ish values but others do not.""" + keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret') + kwargs = dict([(r, cfg.get(r)) for r in keys]) + return url_helper.OauthUrlHelper(**kwargs) + + +def get_id_from_ds_cfg(ds_cfg): + """Given a config, generate a unique identifier for this node.""" + fields = ('consumer_key', 'token_key', 'token_secret') + idstr = '\0'.join([ds_cfg.get(f, "") for f in fields]) + # store the encoding version as part of the hash in the event + # that it ever changed we can compute older versions. + return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest() + def read_maas_seed_dir(seed_d): if seed_d.startswith("file://"): @@ -322,7 +348,7 @@ if __name__ == "__main__": sys.stderr.write("Must provide a url or a config with url.\n") sys.exit(1) - oauth_helper = url_helper.OauthUrlHelper(**creds) + oauth_helper = get_oauth_helper(creds) def geturl(url): # the retry is to ensure that oauth timestamp gets fixed diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 289c6a40..6e4031cf 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from copy import copy +import mock import os import shutil import tempfile @@ -8,15 +9,10 @@ import yaml from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper -from cloudinit.tests.helpers import TestCase, populate_dir +from cloudinit.tests.helpers import CiTestCase, populate_dir -try: - from unittest import mock -except ImportError: - import mock - -class TestMAASDataSource(TestCase): +class TestMAASDataSource(CiTestCase): def setUp(self): super(TestMAASDataSource, self).setUp() @@ -159,4 +155,47 @@ class TestMAASDataSource(TestCase): self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) self.assertEqual(expected_vd, vd) + +@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper") +class TestGetOauthHelper(CiTestCase): + with_logs = True + base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY', + 'token_key': 'FAKE_TOKEN_KEY', + 'token_secret': 'FAKE_TOKEN_SECRET', + 'consumer_secret': None} + + def test_all_required(self, m_helper): + """Valid config as expected.""" + DataSourceMAAS.get_oauth_helper(self.base_cfg.copy()) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + def test_other_fields_not_passed_through(self, m_helper): + """Only relevant fields are passed through.""" + mycfg = self.base_cfg.copy() + mycfg['unrelated_field'] = 'unrelated' + DataSourceMAAS.get_oauth_helper(mycfg) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + +class TestGetIdHash(CiTestCase): + v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY', + 'token_secret': 'TSEC'} + v1_id = ( + 'v1:' + '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392') + + def test_v1_expected(self): + """Test v1 id generated as expected working behavior from config.""" + result = DataSourceMAAS.get_id_from_ds_cfg(self.v1_cfg.copy()) + self.assertEqual(self.v1_id, result) + + def test_v1_extra_fields_are_ignored(self): + """Test v1 id ignores unused entries in config.""" + cfg = self.v1_cfg.copy() + cfg['consumer_secret'] = "BOO" + cfg['unrelated'] = "HI MOM" + result = DataSourceMAAS.get_id_from_ds_cfg(cfg) + self.assertEqual(self.v1_id, result) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 6299e8d0cc230b0c9b41a69a5963bcd2c252c337 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 12 Jan 2018 14:23:26 -0700 Subject: Do not log warning on config files that represent None. This issue was first identified when manual_cache_clean was set, as ds-identify would write /run/cloud-init/cloud.cfg with # manual_cache_clean that would generate a warning as cloud-init expected to load a dict. Any other "empty" config would also log such a warning. Also fix reading of di_report to allow it to be None, as ds-identify would write: di_report: # manual_cache_clean which reads as 'di_report: None' rather than di_report: {}. LP: #1742479 --- cloudinit/cmd/main.py | 8 +++++++- cloudinit/util.py | 10 +++++----- tests/unittests/test_util.py | 8 ++++++++ 3 files changed, 20 insertions(+), 6 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 30b37fe1..d2f1b778 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -421,7 +421,13 @@ def di_report_warn(datasource, cfg): LOG.debug("no di_report found in config.") return - dicfg = cfg.get('di_report', {}) + dicfg = cfg['di_report'] + if dicfg is None: + # ds-identify may write 'di_report:\n #comment\n' + # which reads as {'di_report': None} + LOG.debug("di_report was None.") + return + if not isinstance(dicfg, dict): LOG.warning("di_report config not a dictionary: %s", dicfg) return diff --git a/cloudinit/util.py b/cloudinit/util.py index 8a9f1ab2..e42498d9 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -891,17 +891,17 @@ def load_yaml(blob, default=None, allowed=(dict,)): "of length %s with allowed root types %s", len(blob), allowed) converted = safeyaml.load(blob) - if not isinstance(converted, allowed): + if converted is None: + LOG.debug("loaded blob returned None, returning default.") + converted = default + elif not isinstance(converted, allowed): # Yes this will just be caught, but thats ok for now... raise TypeError(("Yaml load allows %s root types," " but got %s instead") % (allowed, type_utils.obj_name(converted))) loaded = converted except (yaml.YAMLError, TypeError, ValueError): - if len(blob) == 0: - LOG.debug("load_yaml given empty string, returning default") - else: - logexc(LOG, "Failed loading yaml blob") + logexc(LOG, "Failed loading yaml blob") return loaded diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 787ca208..d63b760e 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -299,6 +299,14 @@ class TestLoadYaml(helpers.TestCase): default=self.mydefault), myobj) + def test_none_returns_default(self): + """If yaml.load returns None, then default should be returned.""" + blobs = ("", " ", "# foo\n", "#") + mdef = self.mydefault + self.assertEqual( + [(b, self.mydefault) for b in blobs], + [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs]) + class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_invalid_mountinfo(self): -- cgit v1.2.3 From eb70975eaf37cf9549949f72e7647addb81a52ac Mon Sep 17 00:00:00 2001 From: James Penick Date: Tue, 23 Jan 2018 14:22:54 -0700 Subject: Recognize uppercase vfat disk labels New mkfs.vfat and fatlabel tools included in the dosfsutils package no longer support creating vfat disks with lowercase labels. They silently default to an all uppercase label eg CONFIG-2 instead of config-2. This change makes cloud-init handle either upper or lower case. LP: #1598783 --- cloudinit/sources/DataSourceConfigDrive.py | 4 ++-- tests/unittests/test_datasource/test_configdrive.py | 6 ++++++ tests/unittests/test_ds_identify.py | 17 +++++++++++++++++ tools/ds-identify | 4 +++- 4 files changed, 28 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 870b3688..b8db6267 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -25,7 +25,7 @@ DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } FS_TYPES = ('vfat', 'iso9660') -LABEL_TYPES = ('config-2',) +LABEL_TYPES = ('config-2', 'CONFIG-2') POSSIBLE_MOUNTS = ('sr', 'cd') OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2))) @@ -224,7 +224,7 @@ def find_candidate_devs(probe_optical=True): config drive v2: Disk should be: * either vfat or iso9660 formated - * labeled with 'config-2' + * labeled with 'config-2' or 'CONFIG-2' """ # query optical drive to get it in blkid cache for 2.6 kernels if probe_optical: diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 6ef5a35c..68400f22 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -458,6 +458,12 @@ class TestConfigDriveDataSource(CiTestCase): self.assertEqual(["/dev/vdb3"], ds.find_candidate_devs()) + # Verify that uppercase labels are also found. + devs_with_answers = {"TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=CONFIG-2": ["/dev/vdb"]} + self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) + finally: util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index c9234edd..ad6c5cf4 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -232,6 +232,11 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('ConfigDrive') return + def test_config_drive_upper(self): + """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" + self._test_ds_found('ConfigDriveUpper') + return + def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. @@ -503,6 +508,18 @@ VALID_CFG = { }, ], }, + 'ConfigDriveUpper': { + 'ds': 'ConfigDrive', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CONFIG-2'}]) + }, + ], + }, } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 5893a761..374c3ad1 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -579,6 +579,8 @@ dscheck_NoCloud() { check_configdrive_v2() { if has_fs_with_label "config-2"; then return ${DS_FOUND} + elif has_fs_with_label "CONFIG-2"; then + return ${DS_FOUND} fi # look in /config-drive /seed/config_drive for a directory # openstack/YYYY-MM-DD format with a file meta_data.json @@ -666,7 +668,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From 183d5785954af3a1e7603798d4a91ab126eb7bb9 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 Jan 2018 18:08:14 -0700 Subject: subp: make ProcessExecutionError have expected types in stderr, stdout. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When subp raised a ProcessExecutionError, that exception's stderr and stdout might end up being the string '-' rather than bytes. This mean that:    try:        subp(mycommand, decode=False)    except ProcessExecutionError as e:        pass Would have 'e.stdout' set to '-' while the caller would expect bytes. Also reduce the try/except block in subp to a specifically the two lines that may raise an OSError. --- cloudinit/util.py | 94 ++++++++++++++++++++++++-------------------- tests/unittests/test_util.py | 15 +++++++ 2 files changed, 66 insertions(+), 43 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index e42498d9..df0aa5db 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -253,12 +253,18 @@ class ProcessExecutionError(IOError): self.exit_code = exit_code if not stderr: - self.stderr = self.empty_attr + if stderr is None: + self.stderr = self.empty_attr + else: + self.stderr = stderr else: self.stderr = self._indent_text(stderr) if not stdout: - self.stdout = self.empty_attr + if stdout is None: + self.stdout = self.empty_attr + else: + self.stdout = stdout else: self.stdout = self._indent_text(stdout) @@ -1829,58 +1835,60 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, env = env.copy() env.update(update_env) - try: - if target_path(target) != "/": - args = ['chroot', target] + list(args) + if target_path(target) != "/": + args = ['chroot', target] + list(args) - if not logstring: - LOG.debug(("Running command %s with allowed return codes %s" - " (shell=%s, capture=%s)"), args, rcs, shell, capture) - else: - LOG.debug(("Running hidden command to protect sensitive " - "input/output logstring: %s"), logstring) - - stdin = None - stdout = None - stderr = None - if capture: - stdout = subprocess.PIPE - stderr = subprocess.PIPE - if data is None: - # using devnull assures any reads get null, rather - # than possibly waiting on input. - devnull_fp = open(os.devnull) - stdin = devnull_fp - else: - stdin = subprocess.PIPE - if not isinstance(data, bytes): - data = data.encode() + if not logstring: + LOG.debug(("Running command %s with allowed return codes %s" + " (shell=%s, capture=%s)"), args, rcs, shell, capture) + else: + LOG.debug(("Running hidden command to protect sensitive " + "input/output logstring: %s"), logstring) + + stdin = None + stdout = None + stderr = None + if capture: + stdout = subprocess.PIPE + stderr = subprocess.PIPE + if data is None: + # using devnull assures any reads get null, rather + # than possibly waiting on input. + devnull_fp = open(os.devnull) + stdin = devnull_fp + else: + stdin = subprocess.PIPE + if not isinstance(data, bytes): + data = data.encode() + try: sp = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, env=env, shell=shell) (out, err) = sp.communicate(data) - - # Just ensure blank instead of none. - if not out and capture: - out = b'' - if not err and capture: - err = b'' - if decode: - def ldecode(data, m='utf-8'): - if not isinstance(data, bytes): - return data - return data.decode(m, decode) - - out = ldecode(out) - err = ldecode(err) except OSError as e: - raise ProcessExecutionError(cmd=args, reason=e, - errno=e.errno) + raise ProcessExecutionError( + cmd=args, reason=e, errno=e.errno, + stdout="-" if decode else b"-", + stderr="-" if decode else b"-") finally: if devnull_fp: devnull_fp.close() + # Just ensure blank instead of none. + if not out and capture: + out = b'' + if not err and capture: + err = b'' + if decode: + def ldecode(data, m='utf-8'): + if not isinstance(data, bytes): + return data + return data.decode(m, decode) + + out = ldecode(out) + err = ldecode(err) + rc = sp.returncode if rc not in rcs: raise ProcessExecutionError(stdout=out, stderr=err, diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index d63b760e..4a92e741 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -623,6 +623,7 @@ class TestSubp(helpers.CiTestCase): utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] + bogus_command = 'this-is-not-expected-to-be-a-program-name' def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf @@ -712,6 +713,20 @@ class TestSubp(helpers.CiTestCase): self.assertIsNone(err) self.assertIsNone(out) + def test_exception_has_out_err_are_bytes_if_decode_false(self): + """Raised exc should have stderr, stdout as bytes if no decode.""" + with self.assertRaises(util.ProcessExecutionError) as cm: + util.subp([self.bogus_command], decode=False) + self.assertTrue(isinstance(cm.exception.stdout, bytes)) + self.assertTrue(isinstance(cm.exception.stderr, bytes)) + + def test_exception_has_out_err_are_bytes_if_decode_true(self): + """Raised exc should have stderr, stdout as string if no decode.""" + with self.assertRaises(util.ProcessExecutionError) as cm: + util.subp([self.bogus_command], decode=True) + self.assertTrue(isinstance(cm.exception.stdout, six.string_types)) + self.assertTrue(isinstance(cm.exception.stderr, six.string_types)) + def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", util.target_path("/target/", "//my/path/")) -- cgit v1.2.3 From 2d781c6a3e27433b7fa993cd54b269ceb74e10b2 Mon Sep 17 00:00:00 2001 From: Max Illfelder Date: Tue, 23 Jan 2018 18:12:32 -0700 Subject: GCE: Improvements and changes to ssh key behavior for default user. The behavior changes and improvements include: - Only import keys into the default user that contain the name of the default user ('ubuntu', or 'centos') or that contain 'cloudinit'. - Use instance or project level keys based on GCE convention. - Respect expiration time when keys are set. Do not import expired keys. - Support ssh-keys in project level metadata (the GCE default). As part of this change, we also update the request header when talking to the metadata server based on the documentation: https://cloud.google.com/compute/docs/storing-retrieving-metadata#querying LP: #1670456, #1707033, #1707037, #1707039 --- cloudinit/sources/DataSourceGCE.py | 134 +++++++++++++------ tests/unittests/test_datasource/test_gce.py | 193 +++++++++++++++++++++++++--- 2 files changed, 267 insertions(+), 60 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index ad6dae37..2da34a99 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -2,8 +2,12 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import datetime +import json + from base64 import b64decode +from cloudinit.distros import ug_util from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper @@ -17,16 +21,18 @@ REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') class GoogleMetadataFetcher(object): - headers = {'X-Google-Metadata-Request': 'True'} + headers = {'Metadata-Flavor': 'Google'} def __init__(self, metadata_address): self.metadata_address = metadata_address - def get_value(self, path, is_text): + def get_value(self, path, is_text, is_recursive=False): value = None try: - resp = url_helper.readurl(url=self.metadata_address + path, - headers=self.headers) + url = self.metadata_address + path + if is_recursive: + url += '/?recursive=True' + resp = url_helper.readurl(url=url, headers=self.headers) except url_helper.UrlError as exc: msg = "url %s raised exception %s" LOG.debug(msg, path, exc) @@ -35,7 +41,7 @@ class GoogleMetadataFetcher(object): if is_text: value = util.decode_binary(resp.contents) else: - value = resp.contents + value = resp.contents.decode('utf-8') else: LOG.debug("url %s returned code %s", path, resp.code) return value @@ -47,6 +53,10 @@ class DataSourceGCE(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.default_user = None + if distro: + (users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro) + (self.default_user, _user_config) = ug_util.extract_default(users) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), @@ -70,17 +80,18 @@ class DataSourceGCE(sources.DataSource): @property def launch_index(self): - # GCE does not provide lauch_index property + # GCE does not provide lauch_index property. return None def get_instance_id(self): return self.metadata['instance-id'] def get_public_ssh_keys(self): - return self.metadata['public-keys'] + public_keys_data = self.metadata['public-keys-data'] + return _parse_public_keys(public_keys_data, self.default_user) def get_hostname(self, fqdn=False, resolve_ip=False): - # GCE has long FDQN's and has asked for short hostnames + # GCE has long FDQN's and has asked for short hostnames. return self.metadata['local-hostname'].split('.')[0] @property @@ -92,15 +103,58 @@ class DataSourceGCE(sources.DataSource): return self.availability_zone.rsplit('-', 1)[0] -def _trim_key(public_key): - # GCE takes sshKeys attribute in the format of ':' - # so we have to trim each key to remove the username part +def _has_expired(public_key): + # Check whether an SSH key is expired. Public key input is a single SSH + # public key in the GCE specific key format documented here: + # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat + try: + # Check for the Google-specific schema identifier. + schema, json_str = public_key.split(None, 3)[2:] + except (ValueError, AttributeError): + return False + + # Do not expire keys if they do not have the expected schema identifier. + if schema != 'google-ssh': + return False + + try: + json_obj = json.loads(json_str) + except ValueError: + return False + + # Do not expire keys if there is no expriation timestamp. + if 'expireOn' not in json_obj: + return False + + expire_str = json_obj['expireOn'] + format_str = '%Y-%m-%dT%H:%M:%S+0000' try: - index = public_key.index(':') - if index > 0: - return public_key[(index + 1):] - except Exception: - return public_key + expire_time = datetime.datetime.strptime(expire_str, format_str) + except ValueError: + return False + + # Expire the key if and only if we have exceeded the expiration timestamp. + return datetime.datetime.utcnow() > expire_time + + +def _parse_public_keys(public_keys_data, default_user=None): + # Parse the SSH key data for the default user account. Public keys input is + # a list containing SSH public keys in the GCE specific key format + # documented here: + # https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#sshkeyformat + public_keys = [] + if not public_keys_data: + return public_keys + for public_key in public_keys_data: + if not public_key or not all(ord(c) < 128 for c in public_key): + continue + split_public_key = public_key.split(':', 1) + if len(split_public_key) != 2: + continue + user, key = split_public_key + if user in ('cloudinit', default_user) and not _has_expired(key): + public_keys.append(key) + return public_keys def read_md(address=None, platform_check=True): @@ -116,31 +170,28 @@ def read_md(address=None, platform_check=True): ret['reason'] = "Not running on GCE." return ret - # if we cannot resolve the metadata server, then no point in trying + # If we cannot resolve the metadata server, then no point in trying. if not util.is_resolvable_url(address): LOG.debug("%s is not resolvable", address) ret['reason'] = 'address "%s" is not resolvable' % address return ret - # url_map: (our-key, path, required, is_text) + # url_map: (our-key, path, required, is_text, is_recursive) url_map = [ - ('instance-id', ('instance/id',), True, True), - ('availability-zone', ('instance/zone',), True, True), - ('local-hostname', ('instance/hostname',), True, True), - ('public-keys', ('project/attributes/sshKeys', - 'instance/attributes/ssh-keys'), False, True), - ('user-data', ('instance/attributes/user-data',), False, False), - ('user-data-encoding', ('instance/attributes/user-data-encoding',), - False, True), + ('instance-id', ('instance/id',), True, True, False), + ('availability-zone', ('instance/zone',), True, True, False), + ('local-hostname', ('instance/hostname',), True, True, False), + ('instance-data', ('instance/attributes',), False, False, True), + ('project-data', ('project/attributes',), False, False, True), ] metadata_fetcher = GoogleMetadataFetcher(address) md = {} - # iterate over url_map keys to get metadata items - for (mkey, paths, required, is_text) in url_map: + # Iterate over url_map keys to get metadata items. + for (mkey, paths, required, is_text, is_recursive) in url_map: value = None for path in paths: - new_value = metadata_fetcher.get_value(path, is_text) + new_value = metadata_fetcher.get_value(path, is_text, is_recursive) if new_value is not None: value = new_value if required and value is None: @@ -149,17 +200,23 @@ def read_md(address=None, platform_check=True): return ret md[mkey] = value - if md['public-keys']: - lines = md['public-keys'].splitlines() - md['public-keys'] = [_trim_key(k) for k in lines] + instance_data = json.loads(md['instance-data'] or '{}') + project_data = json.loads(md['project-data'] or '{}') + valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')] + block_project = instance_data.get('block-project-ssh-keys', '').lower() + if block_project != 'true' and not instance_data.get('sshKeys'): + valid_keys.append(project_data.get('ssh-keys')) + valid_keys.append(project_data.get('sshKeys')) + public_keys_data = '\n'.join([key for key in valid_keys if key]) + md['public-keys-data'] = public_keys_data.splitlines() if md['availability-zone']: md['availability-zone'] = md['availability-zone'].split('/')[-1] - encoding = md.get('user-data-encoding') + encoding = instance_data.get('user-data-encoding') if encoding: if encoding == 'base64': - md['user-data'] = b64decode(md['user-data']) + md['user-data'] = b64decode(instance_data.get('user-data')) else: LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) @@ -188,20 +245,19 @@ def platform_reports_gce(): return False -# Used to match classes to dependencies +# Used to match classes to dependencies. datasources = [ (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] -# Return a list of data sources that match this set of dependencies +# Return a list of data sources that match this set of dependencies. def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) if __name__ == "__main__": import argparse - import json import sys from base64 import b64encode @@ -217,7 +273,7 @@ if __name__ == "__main__": data = read_md(address=args.endpoint, platform_check=args.platform_check) if 'user-data' in data: # user-data is bytes not string like other things. Handle it specially. - # if it can be represented as utf-8 then do so. Otherwise print base64 + # If it can be represented as utf-8 then do so. Otherwise print base64 # encoded value in the key user-data-b64. try: data['user-data'] = data['user-data'].decode() @@ -225,7 +281,7 @@ if __name__ == "__main__": sys.stderr.write("User-data cannot be decoded. " "Writing as base64\n") del data['user-data'] - # b64encode returns a bytes value. decode to get the string. + # b64encode returns a bytes value. Decode to get the string. data['user-data-b64'] = b64encode(data['user-data']).decode() print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': '))) diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 82c788dc..12d68009 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -4,13 +4,16 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import datetime import httpretty +import json import mock import re from base64 import b64encode, b64decode from six.moves.urllib_parse import urlparse +from cloudinit import distros from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceGCE @@ -21,10 +24,7 @@ from cloudinit.tests import helpers as test_helpers GCE_META = { 'instance/id': '123', 'instance/zone': 'foo/bar', - 'project/attributes/sshKeys': 'user:ssh-rsa AA2..+aRD0fyVw== root@server', 'instance/hostname': 'server.project-foo.local', - # UnicodeDecodeError below if set to ds.userdata instead of userdata_raw - 'instance/attributes/user-data': b'/bin/echo \xff\n', } GCE_META_PARTIAL = { @@ -37,11 +37,13 @@ GCE_META_ENCODING = { 'instance/id': '12345', 'instance/hostname': 'server.project-baz.local', 'instance/zone': 'baz/bang', - 'instance/attributes/user-data': b64encode(b'/bin/echo baz\n'), - 'instance/attributes/user-data-encoding': 'base64', + 'instance/attributes': { + 'user-data': b64encode(b'/bin/echo baz\n').decode('utf-8'), + 'user-data-encoding': 'base64', + } } -HEADERS = {'X-Google-Metadata-Request': 'True'} +HEADERS = {'Metadata-Flavor': 'Google'} MD_URL_RE = re.compile( r'http://metadata.google.internal/computeMetadata/v1/.*') @@ -54,10 +56,15 @@ def _set_mock_metadata(gce_meta=None): url_path = urlparse(uri).path if url_path.startswith('/computeMetadata/v1/'): path = url_path.split('/computeMetadata/v1/')[1:][0] + recursive = path.endswith('/') + path = path.rstrip('/') else: path = None if path in gce_meta: - return (200, headers, gce_meta.get(path)) + response = gce_meta.get(path) + if recursive: + response = json.dumps(response) + return (200, headers, response) else: return (404, headers, '') @@ -69,6 +76,16 @@ def _set_mock_metadata(gce_meta=None): @httpretty.activate class TestDataSourceGCE(test_helpers.HttprettyTestCase): + def _make_distro(self, dtype, def_user=None): + cfg = dict(settings.CFG_BUILTIN) + cfg['system_info']['distro'] = dtype + paths = helpers.Paths(cfg['system_info']['paths']) + distro_cls = distros.fetch(dtype) + if def_user: + cfg['system_info']['default_user'] = def_user.copy() + distro = distro_cls(dtype, cfg['system_info'], paths) + return distro + def setUp(self): tmp = self.tmp_dir() self.ds = DataSourceGCE.DataSourceGCE( @@ -90,6 +107,10 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertDictContainsSubset(HEADERS, req_header) def test_metadata(self): + # UnicodeDecodeError if set to ds.userdata instead of userdata_raw + meta = GCE_META.copy() + meta['instance/attributes/user-data'] = b'/bin/echo \xff\n' + _set_mock_metadata() self.ds.get_data() @@ -118,8 +139,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): _set_mock_metadata(GCE_META_ENCODING) self.ds.get_data() - decoded = b64decode( - GCE_META_ENCODING.get('instance/attributes/user-data')) + instance_data = GCE_META_ENCODING.get('instance/attributes') + decoded = b64decode(instance_data.get('user-data')) self.assertEqual(decoded, self.ds.get_userdata_raw()) def test_missing_required_keys_return_false(self): @@ -131,33 +152,124 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual(False, self.ds.get_data()) httpretty.reset() - def test_project_level_ssh_keys_are_used(self): + def test_no_ssh_keys_metadata(self): _set_mock_metadata() self.ds.get_data() + self.assertEqual([], self.ds.get_public_ssh_keys()) + + def test_cloudinit_ssh_keys(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + self.ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + @mock.patch("cloudinit.sources.DataSourceGCE.ug_util") + def test_default_user_ssh_keys(self, mock_ug_util): + mock_ug_util.normalize_users_groups.return_value = None, None + mock_ug_util.extract_default.return_value = 'ubuntu', None + ubuntu_ds = DataSourceGCE.DataSourceGCE( + settings.CFG_BUILTIN, self._make_distro('ubuntu'), + helpers.Paths({})) + + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } - # we expect a list of public ssh keys with user names stripped - self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'], - self.ds.get_public_ssh_keys()) + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + ubuntu_ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys())) + + def test_instance_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)), + 'block-project-ssh-keys': 'False', + } - def test_instance_level_ssh_keys_are_used(self): - key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes _set_mock_metadata(meta) self.ds.get_data() - self.assertIn(key_content, self.ds.get_public_ssh_keys()) + expected = [valid_key.format(key) for key in range(2)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + def test_block_project_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'block-project-ssh-keys': 'True', + } - def test_instance_level_keys_replace_project_level_keys(self): - key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes _set_mock_metadata(meta) self.ds.get_data() - self.assertEqual([key_content], self.ds.get_public_ssh_keys()) + expected = [valid_key.format(0)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) def test_only_last_part_of_zone_used_for_availability_zone(self): _set_mock_metadata() @@ -172,5 +284,44 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual(False, ret) m_fetcher.assert_not_called() + def test_has_expired(self): + + def _get_timestamp(days): + format_str = '%Y-%m-%dT%H:%M:%S+0000' + today = datetime.datetime.now() + timestamp = today + datetime.timedelta(days=days) + return timestamp.strftime(format_str) + + past = _get_timestamp(-1) + future = _get_timestamp(1) + ssh_keys = { + None: False, + '': False, + 'Invalid': False, + 'user:ssh-rsa key user@domain.com': False, + 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False, + 'user:ssh-rsa key google-ssh': False, + 'user:ssh-rsa key google-ssh {invalid:json}': False, + 'user:ssh-rsa key google-ssh {"userName":"user"}': False, + 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True, + } + + for key, expired in ssh_keys.items(): + self.assertEqual(DataSourceGCE._has_expired(key), expired) + + def test_parse_public_keys_non_ascii(self): + public_key_data = [ + 'cloudinit:rsa ssh-ke%s invalid' % chr(165), + 'use%sname:rsa ssh-key' % chr(174), + 'cloudinit:test 1', + 'default:test 2', + 'user:test 3', + ] + expected = ['test 1', 'test 2'] + found = DataSourceGCE._parse_public_keys( + public_key_data, default_user='default') + self.assertEqual(sorted(found), sorted(expected)) # vi: ts=4 expandtab -- cgit v1.2.3 From 8a9421421497b3e7c05589c62389745d565c6633 Mon Sep 17 00:00:00 2001 From: Akihiko Ota Date: Wed, 13 Dec 2017 23:46:02 +0900 Subject: OpenNebula: Improve network configuration support. Network configuration in OpenNebula would only work if the host correctly guessed the names of the devices in the guest. OpenNebula provided data in its context.sh like 'ETH0_NETWORK', but if the guest named devices differently then results were not predictable. This would occur with Predictable Network Interface Names. To address this, newer versions (of OpenNebula provide the mac address ETH0_MAC. This function is present in 4.14 and documented officially in 5.0 docs. This provides support for reading the mac addresses from the context.sh. It also fixes cases where context.sh provided a field (ETH0_NETWORK or ETH0_MASK) with a empty string. Previously the empty string would be used rather than falling back to the default. LP: #1719157, #1716397, #1736750 --- cloudinit/net/__init__.py | 4 +- cloudinit/sources/DataSourceOpenNebula.py | 112 ++++++----- tests/unittests/test_datasource/test_opennebula.py | 223 ++++++++++++++++----- tests/unittests/test_net.py | 6 +- 4 files changed, 241 insertions(+), 104 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index a1b0db10..c015e793 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -18,7 +18,7 @@ SYS_CLASS_NET = "/sys/class/net/" DEFAULT_PRIMARY_INTERFACE = 'eth0' -def _natural_sort_key(s, _nsre=re.compile('([0-9]+)')): +def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): """Sorting for Humans: natural sort order. Can be use as the key to sort functions. This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as @@ -224,7 +224,7 @@ def find_fallback_nic(blacklist_drivers=None): # if eth0 exists use it above anything else, otherwise get the interface # that we can read 'first' (using the sorted defintion of first). - names = list(sorted(potential_interfaces, key=_natural_sort_key)) + names = list(sorted(potential_interfaces, key=natural_sort_key)) if DEFAULT_PRIMARY_INTERFACE in names: names.remove(DEFAULT_PRIMARY_INTERFACE) names.insert(0, DEFAULT_PRIMARY_INTERFACE) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index f66c95d7..ce47b6bd 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -12,6 +12,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import collections import os import pwd import re @@ -19,6 +20,7 @@ import string from cloudinit import log as logging from cloudinit import net +from cloudinit.net import eni from cloudinit import sources from cloudinit import util @@ -89,11 +91,18 @@ class DataSourceOpenNebula(sources.DataSource): return False self.seed = seed - self.network_eni = results.get("network_config") + self.network_eni = results.get('network-interfaces') self.metadata = md self.userdata_raw = results.get('userdata') return True + @property + def network_config(self): + if self.network_eni is not None: + return eni.convert_eni_data(self.network_eni) + else: + return None + def get_hostname(self, fqdn=False, resolve_ip=None): if resolve_ip is None: if self.dsmode == sources.DSMODE_NETWORK: @@ -116,58 +125,53 @@ class OpenNebulaNetwork(object): self.context = context if system_nics_by_mac is None: system_nics_by_mac = get_physical_nics_by_mac() - self.ifaces = system_nics_by_mac + self.ifaces = collections.OrderedDict( + [k for k in sorted(system_nics_by_mac.items(), + key=lambda k: net.natural_sort_key(k[1]))]) + + # OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC. + # context_devname provides {mac.lower():ETHX, mac2.lower():ETHX} + self.context_devname = {} + for k, v in context.items(): + m = re.match(r'^(.+)_MAC$', k) + if m: + self.context_devname[v.lower()] = m.group(1) def mac2ip(self, mac): - components = mac.split(':')[2:] - return [str(int(c, 16)) for c in components] + return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]]) - def get_ip(self, dev, components): - var_name = dev.upper() + '_IP' - if var_name in self.context: - return self.context[var_name] - else: - return '.'.join(components) + def mac2network(self, mac): + return self.mac2ip(mac).rpartition(".")[0] + ".0" - def get_mask(self, dev): - var_name = dev.upper() + '_MASK' - if var_name in self.context: - return self.context[var_name] - else: - return '255.255.255.0' + def get_dns(self, dev): + return self.get_field(dev, "dns", "").split() - def get_network(self, dev, components): - var_name = dev.upper() + '_NETWORK' - if var_name in self.context: - return self.context[var_name] - else: - return '.'.join(components[:-1]) + '.0' + def get_domain(self, dev): + return self.get_field(dev, "domain") + + def get_ip(self, dev, mac): + return self.get_field(dev, "ip", self.mac2ip(mac)) def get_gateway(self, dev): - var_name = dev.upper() + '_GATEWAY' - if var_name in self.context: - return self.context[var_name] - else: - return None + return self.get_field(dev, "gateway") - def get_dns(self, dev): - var_name = dev.upper() + '_DNS' - if var_name in self.context: - return self.context[var_name] - else: - return None + def get_mask(self, dev): + return self.get_field(dev, "mask", "255.255.255.0") - def get_domain(self, dev): - var_name = dev.upper() + '_DOMAIN' - if var_name in self.context: - return self.context[var_name] - else: - return None + def get_network(self, dev, mac): + return self.get_field(dev, "network", self.mac2network(mac)) + + def get_field(self, dev, name, default=None): + """return the field name in context for device dev. + + context stores _ (example: eth0_DOMAIN). + an empty string for value will return default.""" + val = self.context.get('_'.join((dev, name,)).upper()) + # allow empty string to return the default. + return default if val in (None, "") else val def gen_conf(self): - global_dns = [] - if 'DNS' in self.context: - global_dns.append(self.context['DNS']) + global_dns = self.context.get('DNS', "").split() conf = [] conf.append('auto lo') @@ -175,29 +179,31 @@ class OpenNebulaNetwork(object): conf.append('') for mac, dev in self.ifaces.items(): - ip_components = self.mac2ip(mac) + mac = mac.lower() + + # c_dev stores name in context 'ETHX' for this device. + # dev stores the current system name. + c_dev = self.context_devname.get(mac, dev) conf.append('auto ' + dev) conf.append('iface ' + dev + ' inet static') - conf.append(' address ' + self.get_ip(dev, ip_components)) - conf.append(' network ' + self.get_network(dev, ip_components)) - conf.append(' netmask ' + self.get_mask(dev)) + conf.append(' #hwaddress %s' % mac) + conf.append(' address ' + self.get_ip(c_dev, mac)) + conf.append(' network ' + self.get_network(c_dev, mac)) + conf.append(' netmask ' + self.get_mask(c_dev)) - gateway = self.get_gateway(dev) + gateway = self.get_gateway(c_dev) if gateway: conf.append(' gateway ' + gateway) - domain = self.get_domain(dev) + domain = self.get_domain(c_dev) if domain: conf.append(' dns-search ' + domain) # add global DNS servers to all interfaces - dns = self.get_dns(dev) + dns = self.get_dns(c_dev) if global_dns or dns: - all_dns = global_dns - if dns: - all_dns.append(dns) - conf.append(' dns-nameservers ' + ' '.join(all_dns)) + conf.append(' dns-nameservers ' + ' '.join(global_dns + dns)) conf.append('') diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 2326dd58..5c3ba012 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -4,6 +4,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util from cloudinit.tests.helpers import mock, populate_dir, CiTestCase +from textwrap import dedent import os import pwd @@ -30,6 +31,8 @@ USER_DATA = '#cloud-config\napt_upgrade: true' SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' HOSTNAME = 'foo.example.com' PUBLIC_IP = '10.0.0.3' +MACADDR = '02:00:0a:12:01:01' +IP_BY_MACADDR = '10.18.1.1' DS_PATH = "cloudinit.sources.DataSourceOpenNebula" @@ -195,24 +198,96 @@ class TestOpenNebulaDataSource(CiTestCase): @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_hostname(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} - for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: PUBLIC_IP}) - results = ds.read_context_disk_dir(my_d) + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: PUBLIC_IP}) + results = ds.read_context_disk_dir(my_d) - self.assertTrue('metadata' in results) - self.assertTrue('local-hostname' in results['metadata']) - self.assertEqual(PUBLIC_IP, results['metadata']['local-hostname']) + self.assertTrue('metadata' in results) + self.assertTrue('local-hostname' in results['metadata']) + self.assertEqual( + PUBLIC_IP, results['metadata']['local-hostname']) @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_network_interfaces(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} - populate_context_dir(self.seed_dir, {'ETH0_IP': '1.2.3.4'}) - results = ds.read_context_disk_dir(self.seed_dir) - - self.assertTrue('network-interfaces' in results) - self.assertTrue('1.2.3.4' in results['network-interfaces']) + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + + # without ETH0_MAC + # for Older OpenNebula? + populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_IP and ETH0_MAC + populate_context_dir( + self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_IP with empty string and ETH0_MAC + # in the case of using Virtual Network contains + # "AR = [ TYPE = ETHER ]" + populate_context_dir( + self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + + # ETH0_NETWORK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_NETWORK': '10.18.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('10.18.0.0' in results['network-interfaces']) + + # ETH0_NETWORK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_NETWORK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('10.18.1.0' in results['network-interfaces']) + + # ETH0_MASK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '255.255.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('255.255.0.0' in results['network-interfaces']) + + # ETH0_MASK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue('255.255.255.0' in results['network-interfaces']) def test_find_candidates(self): def my_devs_with(criteria): @@ -233,7 +308,7 @@ class TestOpenNebulaDataSource(CiTestCase): class TestOpenNebulaNetwork(unittest.TestCase): - system_nics = {'02:00:0a:12:01:01': 'eth0'} + system_nics = ('eth0', 'ens3') def test_lo(self): net = ds.OpenNebulaNetwork(context={}, system_nics_by_mac={}) @@ -244,45 +319,101 @@ iface lo inet loopback @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_eth0(self, m_get_phys_by_mac): - m_get_phys_by_mac.return_value = self.system_nics - net = ds.OpenNebulaNetwork({}) - self.assertEqual(net.gen_conf(), u'''\ -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 10.18.1.1 - network 10.18.1.0 - netmask 255.255.255.0 -''') + for nic in self.system_nics: + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork({}) + self.assertEqual(net.gen_conf(), dedent("""\ + auto lo + iface lo inet loopback + + auto {dev} + iface {dev} inet static + #hwaddress {macaddr} + address 10.18.1.1 + network 10.18.1.0 + netmask 255.255.255.0 + """.format(dev=nic, macaddr=MACADDR))) def test_eth0_override(self): context = { 'DNS': '1.2.3.8', - 'ETH0_IP': '1.2.3.4', - 'ETH0_NETWORK': '1.2.3.0', + 'ETH0_IP': '10.18.1.1', + 'ETH0_NETWORK': '10.18.0.0', 'ETH0_MASK': '255.255.0.0', 'ETH0_GATEWAY': '1.2.3.5', 'ETH0_DOMAIN': 'example.com', - 'ETH0_DNS': '1.2.3.6 1.2.3.7' + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_MAC': '02:00:0a:12:01:01' } - - net = ds.OpenNebulaNetwork(context, - system_nics_by_mac=self.system_nics) - self.assertEqual(net.gen_conf(), u'''\ -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 1.2.3.4 - network 1.2.3.0 - netmask 255.255.0.0 - gateway 1.2.3.5 - dns-search example.com - dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 -''') + for nic in self.system_nics: + expected = dedent("""\ + auto lo + iface lo inet loopback + + auto {dev} + iface {dev} inet static + #hwaddress {macaddr} + address 10.18.1.1 + network 10.18.0.0 + netmask 255.255.0.0 + gateway 1.2.3.5 + dns-search example.com + dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 + """).format(dev=nic, macaddr=MACADDR) + net = ds.OpenNebulaNetwork(context, + system_nics_by_mac={MACADDR: nic}) + self.assertEqual(expected, net.gen_conf()) + + def test_multiple_nics(self): + """Test rendering multiple nics with names that differ from context.""" + MAC_1 = "02:00:0a:12:01:01" + MAC_2 = "02:00:0a:12:01:02" + context = { + 'DNS': '1.2.3.8', + 'ETH0_IP': '10.18.1.1', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_DOMAIN': 'example.com', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_MAC': MAC_2, + 'ETH3_IP': '10.3.1.3', + 'ETH3_NETWORK': '10.3.0.0', + 'ETH3_MASK': '255.255.0.0', + 'ETH3_GATEWAY': '10.3.0.1', + 'ETH3_DOMAIN': 'third.example.com', + 'ETH3_DNS': '10.3.1.2', + 'ETH3_MAC': MAC_1, + } + net = ds.OpenNebulaNetwork( + context, system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}) + + expected = dedent("""\ + auto lo + iface lo inet loopback + + auto enp0s25 + iface enp0s25 inet static + #hwaddress 02:00:0a:12:01:01 + address 10.3.1.3 + network 10.3.0.0 + netmask 255.255.0.0 + gateway 10.3.0.1 + dns-search third.example.com + dns-nameservers 1.2.3.8 10.3.1.2 + + auto enp1s2 + iface enp1s2 inet static + #hwaddress 02:00:0a:12:01:02 + address 10.18.1.1 + network 10.18.0.0 + netmask 255.255.0.0 + gateway 1.2.3.5 + dns-search example.com + dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 + """) + + self.assertEqual(expected, net.gen_conf()) class TestParseShellConfig(unittest.TestCase): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index f3fa2a30..ddea13d7 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,9 +1,9 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import net -from cloudinit.net import _natural_sort_key from cloudinit.net import cmdline from cloudinit.net import eni +from cloudinit.net import natural_sort_key from cloudinit.net import netplan from cloudinit.net import network_state from cloudinit.net import renderers @@ -2708,11 +2708,11 @@ class TestInterfacesSorting(CiTestCase): def test_natural_order(self): data = ['ens5', 'ens6', 'ens3', 'ens20', 'ens13', 'ens2'] self.assertEqual( - sorted(data, key=_natural_sort_key), + sorted(data, key=natural_sort_key), ['ens2', 'ens3', 'ens5', 'ens6', 'ens13', 'ens20']) data2 = ['enp2s0', 'enp2s3', 'enp0s3', 'enp0s13', 'enp0s8', 'enp1s2'] self.assertEqual( - sorted(data2, key=_natural_sort_key), + sorted(data2, key=natural_sort_key), ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) -- cgit v1.2.3 From b28ab78089d362c5c6cab985feee0f5f84c9db44 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Mon, 27 Nov 2017 19:05:52 -0500 Subject: btrfs: support resizing if root is mounted ro. Resize of btrfs fails if the mount point for the file system we are trying to resize, i.e. the root of the filesystem is read only. With this change we use a known (currently snapper specific) rw location to work around a flaw that blocks resizing of the ro filesystem. LP: #1734787 --- cloudinit/config/cc_resizefs.py | 12 +++++- cloudinit/tests/test_util.py | 46 ++++++++++++++++++++++ cloudinit/util.py | 23 ++++++++--- .../test_handler/test_handler_resizefs.py | 22 ++++++++++- 4 files changed, 95 insertions(+), 8 deletions(-) create mode 100644 cloudinit/tests/test_util.py (limited to 'cloudinit') diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 0d282e63..cec22bb7 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -59,7 +59,17 @@ __doc__ = get_schema_doc(schema) # Supplement python help() def _resize_btrfs(mount_point, devpth): - return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + # If "/" is ro resize will fail. However it should be allowed since resize + # makes everything bigger and subvolumes that are not ro will benefit. + # Use a subvolume that is not ro to trick the resize operation to do the + # "right" thing. The use of ".snapshot" is specific to "snapper" a generic + # solution would be walk the subvolumes and find a rw mounted subvolume. + if (not util.mount_is_read_write(mount_point) and + os.path.isdir("%s/.snapshots" % mount_point)): + return ('btrfs', 'filesystem', 'resize', 'max', + '%s/.snapshots' % mount_point) + else: + return ('btrfs', 'filesystem', 'resize', 'max', mount_point) def _resize_ext(mount_point, devpth): diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py new file mode 100644 index 00000000..ba6bf699 --- /dev/null +++ b/cloudinit/tests/test_util.py @@ -0,0 +1,46 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.util""" + +import logging + +import cloudinit.util as util + +from cloudinit.tests.helpers import CiTestCase, mock + +LOG = logging.getLogger(__name__) + +MOUNT_INFO = [ + '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', + '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' +] + + +class TestUtil(CiTestCase): + + def test_parse_mount_info_no_opts_no_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_no_opts_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_with_opts(self): + result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) + self.assertEqual( + ('/dev/sda1', 'btrfs', '/', 'ro,relatime'), + result + ) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_rw(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, True) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_ro(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, False) diff --git a/cloudinit/util.py b/cloudinit/util.py index df0aa5db..9976400f 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2059,7 +2059,7 @@ def expand_package_list(version_fmt, pkgs): return pkglist -def parse_mount_info(path, mountinfo_lines, log=LOG): +def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False): """Return the mount information for PATH given the lines from /proc/$$/mountinfo.""" @@ -2121,11 +2121,16 @@ def parse_mount_info(path, mountinfo_lines, log=LOG): match_mount_point = mount_point match_mount_point_elements = mount_point_elements + mount_options = parts[5] - if devpth and fs_type and match_mount_point: - return (devpth, fs_type, match_mount_point) + if get_mnt_opts: + if devpth and fs_type and match_mount_point and mount_options: + return (devpth, fs_type, match_mount_point, mount_options) else: - return None + if devpth and fs_type and match_mount_point: + return (devpth, fs_type, match_mount_point) + + return None def parse_mtab(path): @@ -2195,7 +2200,7 @@ def parse_mount(path): return None -def get_mount_info(path, log=LOG): +def get_mount_info(path, log=LOG, get_mnt_opts=False): # Use /proc/$$/mountinfo to find the device where path is mounted. # This is done because with a btrfs filesystem using os.stat(path) # does not return the ID of the device. @@ -2227,7 +2232,7 @@ def get_mount_info(path, log=LOG): mountinfo_path = '/proc/%s/mountinfo' % os.getpid() if os.path.exists(mountinfo_path): lines = load_file(mountinfo_path).splitlines() - return parse_mount_info(path, lines, log) + return parse_mount_info(path, lines, log, get_mnt_opts) elif os.path.exists("/etc/mtab"): return parse_mtab(path) else: @@ -2613,4 +2618,10 @@ def wait_for_files(flist, maxwait, naplen=.5, log_pre=""): return need +def mount_is_read_write(mount_point): + """Check whether the given mount point is mounted rw""" + result = get_mount_info(mount_point, get_mnt_opts=True) + mount_opts = result[-1].split(',') + return mount_opts[0] == 'rw' + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 29d5574d..5aa3c498 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.config.cc_resizefs import ( - can_skip_resize, handle, maybe_get_writable_device_path) + can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs) from collections import namedtuple import logging @@ -293,5 +293,25 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): " per kernel cmdline", self.logs.getvalue()) + @mock.patch('cloudinit.util.mount_is_read_write') + @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw): + """Do not resize / directly if it is read-only. (LP: #1734787).""" + m_is_rw.return_value = False + m_is_dir.return_value = True + self.assertEqual( + ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'), + _resize_btrfs("/", "/dev/sda1")) + + @mock.patch('cloudinit.util.mount_is_read_write') + @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw): + """Do not resize / directly if it is read-only. (LP: #1734787).""" + m_is_rw.return_value = True + m_is_dir.return_value = True + self.assertEqual( + ('btrfs', 'filesystem', 'resize', 'max', '/'), + _resize_btrfs("/", "/dev/sda1")) + # vi: ts=4 expandtab -- cgit v1.2.3 From df182de41fd75bc0cf2b1de2b511c37527d45475 Mon Sep 17 00:00:00 2001 From: aRkadeFR Date: Tue, 23 Jan 2018 15:48:56 -0500 Subject: docs: Fix typos in docs and one debug message. Fix obvious typos. Replace 'for for' with a 'for'. --- cloudinit/config/cc_rsyslog.py | 10 +++++----- cloudinit/sources/__init__.py | 2 +- doc/rtd/topics/network-config-format-v1.rst | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 50ff9e35..af08788c 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -20,15 +20,15 @@ which defaults to ``20-cloud-config.conf``. The rsyslog config directory to write config files to may be specified in ``config_dir``, which defaults to ``/etc/rsyslog.d``. -A list of configurations for for rsyslog can be specified under the ``configs`` -key in the ``rsyslog`` config. Each entry in ``configs`` is either a string or -a dictionary. Each config entry contains a configuration string and a file to +A list of configurations for rsyslog can be specified under the ``configs`` key +in the ``rsyslog`` config. Each entry in ``configs`` is either a string or a +dictionary. Each config entry contains a configuration string and a file to write it to. For config entries that are a dictionary, ``filename`` sets the target filename and ``content`` specifies the config string to write. For config entries that are only a string, the string is used as the config string to write. If the filename to write the config to is not specified, the value of -the ``config_filename`` key is used. A file with the selected filename will -be written inside the directory specified by ``config_dir``. +the ``config_filename`` key is used. A file with the selected filename will be +written inside the directory specified by ``config_dir``. The command to use to reload the rsyslog service after the config has been updated can be specified in ``service_reload_command``. If this is set to diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 4b819ce6..a05ca2f6 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -448,7 +448,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): # Return an ordered list of classes that match (if any) def list_sources(cfg_list, depends, pkg_list): src_list = [] - LOG.debug(("Looking for for data source in: %s," + LOG.debug(("Looking for data source in: %s," " via packages %s that matches dependencies %s"), cfg_list, pkg_list, depends) for ds_name in cfg_list: diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst index ce3a1bde..2f8ab54c 100644 --- a/doc/rtd/topics/network-config-format-v1.rst +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -349,7 +349,7 @@ For any network device (one of the Config Types) users can define a list of entries will create interface alias allowing a single interface to use different ip configurations. -Valid keys for for ``subnets`` include the following: +Valid keys for ``subnets`` include the following: - ``type``: Specify the subnet type. - ``control``: Specify manual, auto or hotplug. Indicates how the interface -- cgit v1.2.3 From c03bdd3d8ed762cada813c5e95a40b14d2047b57 Mon Sep 17 00:00:00 2001 From: Douglas Jordan Date: Wed, 24 Jan 2018 16:10:08 -0700 Subject: Azure VM Preprovisioning support. This change will enable azure vms to report provisioning has completed twice, first to tell the fabric it has completed then a second time to enable customer settings. The datasource for the second provisioning is the Instance Metadata Service (IMDS),and the VM will poll indefinitely for the new ovf-env.xml from IMDS. This branch introduces EphemeralDHCPv4 which encapsulates common logic used by both DataSourceEc2 an DataSourceAzure for temporary DHCP interactions without side-effects. LP: #1734991 --- .gitignore | 1 + cloudinit/net/dhcp.py | 43 ++++++- cloudinit/net/network_state.py | 12 ++ cloudinit/sources/DataSourceAzure.py | 138 ++++++++++++++++++++-- cloudinit/sources/DataSourceEc2.py | 23 ++-- cloudinit/sources/helpers/azure.py | 22 ++-- cloudinit/temp_utils.py | 11 +- cloudinit/url_helper.py | 29 +++-- tests/unittests/test_datasource/test_azure.py | 157 +++++++++++++++++++++++++- tests/unittests/test_datasource/test_ec2.py | 2 +- tests/unittests/test_net.py | 12 ++ 11 files changed, 397 insertions(+), 53 deletions(-) (limited to 'cloudinit') diff --git a/.gitignore b/.gitignore index b0500a68..75565ed4 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ parts prime stage *.snap +*.cover diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 875a4609..087c0c03 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -10,7 +10,9 @@ import os import re import signal -from cloudinit.net import find_fallback_nic, get_devicelist +from cloudinit.net import ( + EphemeralIPv4Network, find_fallback_nic, get_devicelist) +from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip from cloudinit import temp_utils from cloudinit import util from six import StringIO @@ -29,6 +31,45 @@ class InvalidDHCPLeaseFileError(Exception): pass +class NoDHCPLeaseError(Exception): + """Raised when unable to get a DHCP lease.""" + pass + + +class EphemeralDHCPv4(object): + def __init__(self, iface=None): + self.iface = iface + self._ephipv4 = None + + def __enter__(self): + try: + leases = maybe_perform_dhcp_discovery(self.iface) + except InvalidDHCPLeaseFileError: + raise NoDHCPLeaseError() + if not leases: + raise NoDHCPLeaseError() + lease = leases[-1] + LOG.debug("Received dhcp lease on %s for %s/%s", + lease['interface'], lease['fixed-address'], + lease['subnet-mask']) + nmap = {'interface': 'interface', 'ip': 'fixed-address', + 'prefix_or_mask': 'subnet-mask', + 'broadcast': 'broadcast-address', + 'router': 'routers'} + kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) + if not kwargs['broadcast']: + kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) + ephipv4 = EphemeralIPv4Network(**kwargs) + ephipv4.__enter__() + self._ephipv4 = ephipv4 + return lease + + def __exit__(self, excp_type, excp_value, excp_traceback): + if not self._ephipv4: + return + self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) + + def maybe_perform_dhcp_discovery(nic=None): """Perform dhcp discovery if nic valid and dhclient command exists. diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 31738c73..fe667d88 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -961,4 +961,16 @@ def mask_to_net_prefix(mask): return ipv4_mask_to_net_prefix(mask) +def mask_and_ipv4_to_bcast_addr(mask, ip): + """Calculate the broadcast address from the subnet mask and ip addr. + + Supports ipv4 only.""" + ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2) + mask_dec = ipv4_mask_to_net_prefix(mask) + bcast_bin = ip_bin | (2**(32 - mask_dec) - 1) + bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF) + for i in range(4)[::-1]]) + return bcast_str + + # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d1d09757..4bcbf3a4 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -11,13 +11,16 @@ from functools import partial import os import os.path import re +from time import time from xml.dom import minidom import xml.etree.ElementTree as ET from cloudinit import log as logging from cloudinit import net +from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric +from cloudinit.url_helper import readurl, wait_for_url, UrlError from cloudinit import util LOG = logging.getLogger(__name__) @@ -44,6 +47,9 @@ LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' +REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" +IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" +IMDS_RETRIES = 5 def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -276,19 +282,20 @@ class DataSourceAzure(sources.DataSource): with temporary_hostname(azure_hostname, self.ds_cfg, hostname_command=hostname_command) \ - as previous_hostname: - if (previous_hostname is not None and + as previous_hn: + if (previous_hn is not None and util.is_true(self.ds_cfg.get('set_hostname'))): cfg = self.ds_cfg['hostname_bounce'] # "Bouncing" the network try: - perform_hostname_bounce(hostname=azure_hostname, - cfg=cfg, - prev_hostname=previous_hostname) + return perform_hostname_bounce(hostname=azure_hostname, + cfg=cfg, + prev_hostname=previous_hn) except Exception as e: LOG.warning("Failed publishing hostname: %s", e) util.logexc(LOG, "handling set_hostname failed") + return False def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') @@ -345,15 +352,20 @@ class DataSourceAzure(sources.DataSource): ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] + if os.path.isfile(REPROVISION_MARKER_FILE): + candidates.insert(0, "IMDS") candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None - + reprovision = False for cdev in candidates: try: - if cdev.startswith("/dev/"): + if cdev == "IMDS": + ret = None + reprovision = True + elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, mtype="udf", sync=False) @@ -370,6 +382,8 @@ class DataSourceAzure(sources.DataSource): LOG.warning("%s was not mountable", cdev) continue + if reprovision or self._should_reprovision(ret): + ret = self._reprovision() (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) @@ -428,6 +442,83 @@ class DataSourceAzure(sources.DataSource): LOG.debug("negotiating already done for %s", self.get_instance_id()) + def _poll_imds(self, report_ready=True): + """Poll IMDS for the new provisioning data until we get a valid + response. Then return the returned JSON object.""" + url = IMDS_URL + "?api-version=2017-04-02" + headers = {"Metadata": "true"} + LOG.debug("Start polling IMDS") + + def sleep_cb(response, loop_n): + return 1 + + def exception_cb(msg, exception): + if isinstance(exception, UrlError) and exception.code == 404: + return + LOG.warning("Exception during polling. Will try DHCP.", + exc_info=True) + + # If we get an exception while trying to call IMDS, we + # call DHCP and setup the ephemeral network to acquire the new IP. + raise exception + + need_report = report_ready + for i in range(IMDS_RETRIES): + try: + with EphemeralDHCPv4() as lease: + if need_report: + self._report_ready(lease=lease) + need_report = False + wait_for_url([url], max_wait=None, timeout=60, + status_cb=LOG.info, + headers_cb=lambda url: headers, sleep_time=1, + exception_cb=exception_cb, + sleep_time_cb=sleep_cb) + return str(readurl(url, headers=headers)) + except Exception: + LOG.debug("Exception during polling-retrying dhcp" + + " %d more time(s).", (IMDS_RETRIES - i), + exc_info=True) + + def _report_ready(self, lease): + """Tells the fabric provisioning has completed + before we go into our polling loop.""" + try: + get_metadata_from_fabric(None, lease['unknown-245']) + except Exception as exc: + LOG.warning( + "Error communicating with Azure fabric; You may experience." + "connectivity issues.", exc_info=True) + + def _should_reprovision(self, ret): + """Whether or not we should poll IMDS for reprovisioning data. + Also sets a marker file to poll IMDS. + + The marker file is used for the following scenario: the VM boots into + this polling loop, which we expect to be proceeding infinitely until + the VM is picked. If for whatever reason the platform moves us to a + new host (for instance a hardware issue), we need to keep polling. + However, since the VM reports ready to the Fabric, we will not attach + the ISO, thus cloud-init needs to have a way of knowing that it should + jump back into the polling loop in order to retrieve the ovf_env.""" + if not ret: + return False + (md, self.userdata_raw, cfg, files) = ret + path = REPROVISION_MARKER_FILE + if (cfg.get('PreprovisionedVm') is True or + os.path.isfile(path)): + if not os.path.isfile(path): + LOG.info("Creating a marker file to poll imds") + util.write_file(path, "%s: %s\n" % (os.getpid(), time())) + return True + return False + + def _reprovision(self): + """Initiate the reprovisioning workflow.""" + contents = self._poll_imds() + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + def _negotiate(self): """Negotiate with fabric and return data from it. @@ -453,7 +544,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False - + util.del_file(REPROVISION_MARKER_FILE) return fabric_data def activate(self, cfg, is_new_instance): @@ -595,6 +686,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command + # Returns True if the network was bounced, False otherwise. command = cfg['command'] interface = cfg['interface'] policy = cfg['policy'] @@ -614,7 +706,8 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): else: LOG.debug( "Skipping network bounce: ifupdown utils aren't present.") - return # Don't bounce as networkd handles hostname DDNS updates + # Don't bounce as networkd handles hostname DDNS updates + return False LOG.debug("pubhname: publishing hostname [%s]", msg) shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. @@ -622,6 +715,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): get_uptime=True, func=util.subp, kwargs={'args': command, 'shell': shell, 'capture': False, 'env': env}) + return True def crtfile_to_pubkey(fname, data=None): @@ -838,9 +932,35 @@ def read_azure_ovf(contents): if 'ssh_pwauth' not in cfg and password: cfg['ssh_pwauth'] = True + cfg['PreprovisionedVm'] = _extract_preprovisioned_vm_setting(dom) + return (md, ud, cfg) +def _extract_preprovisioned_vm_setting(dom): + """Read the preprovision flag from the ovf. It should not + exist unless true.""" + platform_settings_section = find_child( + dom.documentElement, + lambda n: n.localName == "PlatformSettingsSection") + if not platform_settings_section or len(platform_settings_section) == 0: + LOG.debug("PlatformSettingsSection not found") + return False + platform_settings = find_child( + platform_settings_section[0], + lambda n: n.localName == "PlatformSettings") + if not platform_settings or len(platform_settings) == 0: + LOG.debug("PlatformSettings not found") + return False + preprovisionedVm = find_child( + platform_settings[0], + lambda n: n.localName == "PreprovisionedVm") + if not preprovisionedVm or len(preprovisionedVm) == 0: + LOG.debug("PreprovisionedVm not found") + return False + return util.translate_bool(preprovisionedVm[0].firstChild.nodeValue) + + def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0f89f34d..e14553b3 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -14,7 +14,7 @@ import time from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import net -from cloudinit.net import dhcp +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util @@ -102,22 +102,13 @@ class DataSourceEc2(sources.DataSource): if util.is_FreeBSD(): LOG.debug("FreeBSD doesn't support running dhclient with -sf") return False - dhcp_leases = dhcp.maybe_perform_dhcp_discovery( - self.fallback_interface) - if not dhcp_leases: - # DataSourceEc2Local failed in init-local stage. DataSourceEc2 - # will still run in init-network stage. + try: + with EphemeralDHCPv4(self.fallback_interface): + return util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self._crawl_metadata) + except NoDHCPLeaseError: return False - dhcp_opts = dhcp_leases[-1] - net_params = {'interface': dhcp_opts.get('interface'), - 'ip': dhcp_opts.get('fixed-address'), - 'prefix_or_mask': dhcp_opts.get('subnet-mask'), - 'broadcast': dhcp_opts.get('broadcast-address'), - 'router': dhcp_opts.get('routers')} - with net.EphemeralIPv4Network(**net_params): - return util.log_time( - logfunc=LOG.debug, msg='Crawl of metadata service', - func=self._crawl_metadata) else: return self._crawl_metadata() diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 6cda5721..90c12df1 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -199,10 +199,10 @@ class WALinuxAgentShim(object): ' ', '']) - def __init__(self, fallback_lease_file=None): + def __init__(self, fallback_lease_file=None, dhcp_options=None): LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s', fallback_lease_file) - self.dhcpoptions = None + self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None self.values = {} @@ -220,7 +220,8 @@ class WALinuxAgentShim(object): @property def endpoint(self): if self._endpoint is None: - self._endpoint = self.find_endpoint(self.lease_file) + self._endpoint = self.find_endpoint(self.lease_file, + self.dhcpoptions) return self._endpoint @staticmethod @@ -292,10 +293,14 @@ class WALinuxAgentShim(object): return _value @staticmethod - def find_endpoint(fallback_lease_file=None): + def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None - LOG.debug('Finding Azure endpoint from networkd...') - value = WALinuxAgentShim._networkd_get_value_from_leases() + if dhcp245 is not None: + value = dhcp245 + LOG.debug("Using Azure Endpoint from dhcp options") + if value is None: + LOG.debug('Finding Azure endpoint from networkd...') + value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/.json # a dhclient exit hook that calls cloud-init-dhclient-hook @@ -367,8 +372,9 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None): - shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file) +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): + shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, + dhcp_options=dhcp_opts) try: return shim.register_with_azure_and_fetch_data() finally: diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py index 5d7adf70..c98a1b53 100644 --- a/cloudinit/temp_utils.py +++ b/cloudinit/temp_utils.py @@ -28,13 +28,18 @@ def _tempfile_dir_arg(odir=None, needs_exe=False): if odir is not None: return odir + if needs_exe: + tdir = _EXE_ROOT_TMPDIR + if not os.path.isdir(tdir): + os.makedirs(tdir) + os.chmod(tdir, 0o1777) + return tdir + global _TMPDIR if _TMPDIR: return _TMPDIR - if needs_exe: - tdir = _EXE_ROOT_TMPDIR - elif os.getuid() == 0: + if os.getuid() == 0: tdir = _ROOT_TMPDIR else: tdir = os.environ.get('TMPDIR', '/tmp') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0e0f5b4c..0a5be0b3 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -273,7 +273,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, headers_cb=None, sleep_time=1, - exception_cb=None): + exception_cb=None, sleep_time_cb=None): """ urls: a list of urls to try max_wait: roughly the maximum time to wait before giving up @@ -286,6 +286,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, for request. exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. + sleep_time_cb: call method with 2 arguments (response, loop_n) that + generates the next sleep time. the idea of this routine is to wait for the EC2 metdata service to come up. On both Eucalyptus and EC2 we have seen the case where @@ -301,6 +303,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, service but is not going to find one. It is possible that the instance data host (169.254.169.254) may be firewalled off Entirely for a sytem, meaning that the connection will block forever unless a timeout is set. + + A value of None for max_wait will retry indefinitely. """ start_time = time.time() @@ -311,18 +315,24 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb = log_status_cb def timeup(max_wait, start_time): - return ((max_wait <= 0 or max_wait is None) or - (time.time() - start_time > max_wait)) + if (max_wait is None): + return False + return ((max_wait <= 0) or (time.time() - start_time > max_wait)) loop_n = 0 + response = None while True: - sleep_time = int(loop_n / 5) + 1 + if sleep_time_cb is not None: + sleep_time = sleep_time_cb(response, loop_n) + else: + sleep_time = int(loop_n / 5) + 1 for url in urls: now = time.time() if loop_n != 0: if timeup(max_wait, start_time): break - if timeout and (now + timeout > (start_time + max_wait)): + if (max_wait is not None and + timeout and (now + timeout > (start_time + max_wait))): # shorten timeout to not run way over max_time timeout = int((start_time + max_wait) - now) @@ -354,10 +364,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, url_exc = e time_taken = int(time.time() - start_time) - status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url, - time_taken, - max_wait, - reason) + max_wait_str = "%ss" % max_wait if max_wait else "unlimited" + status_msg = "Calling '%s' failed [%s/%s]: %s" % (url, + time_taken, + max_wait_str, + reason) status_cb(status_msg) if exception_cb: # This can be used to alter the headers that will be sent diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 6341e1e8..254e9876 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -5,7 +5,7 @@ from cloudinit.util import b64e, decode_binary, load_file, write_file from cloudinit.sources import DataSourceAzure as dsaz from cloudinit.util import find_freebsd_part from cloudinit.util import get_path_dev_freebsd - +from cloudinit.version import version_string as vs from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, ExitStack, PY26, SkipTest) @@ -16,7 +16,8 @@ import xml.etree.ElementTree as ET import yaml -def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): +def construct_valid_ovf_env(data=None, pubkeys=None, + userdata=None, platform_settings=None): if data is None: data = {'HostName': 'FOOHOST'} if pubkeys is None: @@ -66,10 +67,12 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> kms.core.windows.net false - - - - """ + """ + if platform_settings: + for k, v in platform_settings.items(): + content += "<%s>%s\n" % (k, v, k) + content += """ +""" return content @@ -1107,4 +1110,146 @@ class TestAzureNetExists(CiTestCase): self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) +@mock.patch('cloudinit.sources.DataSourceAzure.util.subp') +@mock.patch.object(dsaz, 'get_hostname') +@mock.patch.object(dsaz, 'set_hostname') +class TestAzureDataSourcePreprovisioning(CiTestCase): + + def setUp(self): + super(TestAzureDataSourcePreprovisioning, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + def test_read_azure_ovf_with_true_flag(self, *args): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag if the proper setting is present.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_with_false_flag(self, *args): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag to false if the proper setting is false.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "False"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_without_flag(self, *args): + """The read_azure_ovf method should not set the + PreprovisionedVM cfg flag.""" + content = construct_valid_ovf_env() + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('requests.Session.request') + def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net, + m_is_bsd, *args): + """The _poll_imds method should return the ovf_env.xml.""" + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02' + host = "169.254.169.254" + full_url = url.format(host) + fake_resp.return_value = mock.MagicMock(status_code=200, text="ovf") + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(len(dsa._poll_imds()) > 0) + self.assertEqual(fake_resp.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', timeout=60.0, + url=full_url), + mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', url=full_url)]) + self.assertEqual(m_dhcp.call_count, 1) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1') + self.assertEqual(m_net.call_count, 1) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('requests.Session.request') + def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net, + m_is_bsd, *args): + """The _reprovision method should call poll IMDS.""" + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02' + host = "169.254.169.254" + full_url = url.format(host) + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + content = construct_valid_ovf_env(data=odata) + fake_resp.return_value = mock.MagicMock(status_code=200, text=content) + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + md, ud, cfg, d = dsa._reprovision() + self.assertEqual(md['local-hostname'], hostname) + self.assertEqual(cfg['system_info']['default_user']['name'], username) + self.assertEqual(fake_resp.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs()}, + method='GET', timeout=60.0, url=full_url), + mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs()}, + method='GET', url=full_url)]) + self.assertEqual(m_dhcp.call_count, 1) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1') + self.assertEqual(m_net.call_count, 1) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch('os.path.isfile') + def test__should_reprovision_with_true_cfg(self, isfile, write_f, *args): + """The _should_reprovision method should return true with config + flag present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'PreprovisionedVm': True}, None))) + + @mock.patch('os.path.isfile') + def test__should_reprovision_with_file_existing(self, isfile, *args): + """The _should_reprovision method should return True if the sentinal + exists.""" + isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'preprovisionedvm': False}, None))) + + @mock.patch('os.path.isfile') + def test__should_reprovision_returns_false(self, isfile, *args): + """The _should_reprovision method should return False + if config and sentinal are not present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(dsa._should_reprovision((None, None, {}, None))) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index f0dc8338..0f7267bb 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -425,7 +425,7 @@ class TestEc2(test_helpers.HttprettyTestCase): self.logs.getvalue()) @httpretty.activate - @mock.patch('cloudinit.net.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.find_fallback_nic') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index ddea13d7..ac33e8ef 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2948,4 +2948,16 @@ class TestRenameInterfaces(CiTestCase): mock_subp.assert_has_calls(expected) +class TestNetworkState(CiTestCase): + + def test_bcast_addr(self): + """Test mask_and_ipv4_to_bcast_addr proper execution.""" + bcast_addr = network_state.mask_and_ipv4_to_bcast_addr + self.assertEqual("192.168.1.255", + bcast_addr("255.255.255.0", "192.168.1.1")) + self.assertEqual("128.42.7.255", + bcast_addr("255.255.248.0", "128.42.5.4")) + self.assertEqual("10.1.21.255", + bcast_addr("255.255.255.0", "10.1.21.4")) + # vi: ts=4 expandtab -- cgit v1.2.3 From 1d8c327139a8c291eeb244ee1a6a8badd83e9e72 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 26 Jan 2018 13:36:30 -0700 Subject: Fix potential cases of uninitialized variables. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While addressing undeclared variable in 'cloud-init status', I also fixed the errors raised by automated code reviews against cloud-init master at https://lgtm.com/projects/g/cloud-init/cloud-init/alerts The following items are addressed:  * Fix 'cloud-init status':     * Only report 'running' state when any stage in /run/cloud-init/status.json has a start time but no finished time. Default start time to 0 if null.     * undeclared variable 'reason' now reports 'Cloud-init enabled by systemd cloud-init-generator' when systemd enables cloud-init  * cc_rh_subscription.py util.subp return values aren't set during if an exception is raised, use ProcessExecution as e instead.  * distros/freebsd.py:    * Drop repetitive looping over ipv4 and ipv6 nic lists.    * Initialize bsddev to 'NOTFOUND' in the event that no devs are discovered    * declare nics_with_addresses = set() in broader scope outside check_downable conditional  * cloudinit/util.py: Raise TypeError if mtype parameter isn't string, iterable or None. LP: #1744796 --- cloudinit/cmd/status.py | 7 +++++-- cloudinit/cmd/tests/test_status.py | 21 ++++++++++++++++++--- cloudinit/config/cc_power_state_change.py | 1 + cloudinit/config/cc_rh_subscription.py | 5 ++--- cloudinit/distros/freebsd.py | 11 +++-------- cloudinit/util.py | 4 ++++ 6 files changed, 33 insertions(+), 16 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index 3e5d0d07..d7aaee9d 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -93,6 +93,8 @@ def _is_cloudinit_disabled(disable_file, paths): elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): is_disabled = True reason = 'Cloud-init disabled by cloud-init-generator' + else: + reason = 'Cloud-init enabled by systemd cloud-init-generator' return (is_disabled, reason) @@ -127,10 +129,11 @@ def _get_status_details(paths): status_detail = value elif isinstance(value, dict): errors.extend(value.get('errors', [])) + start = value.get('start') or 0 finished = value.get('finished') or 0 - if finished == 0: + if finished == 0 and start != 0: status = STATUS_RUNNING - event_time = max(value.get('start', 0), finished) + event_time = max(start, finished) if event_time > latest_event: latest_event = event_time if errors: diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index 6d4a11e8..a7c0a91a 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -93,6 +93,19 @@ class TestStatus(CiTestCase): self.assertTrue(is_disabled, 'expected disabled cloud-init') self.assertEqual('Cloud-init disabled by cloud-init-generator', reason) + def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): + '''Report enabled when systemd generator creates the enabled file.''' + enabled_file = os.path.join(self.paths.run_dir, 'enabled') + write_file(enabled_file, '') + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something ignored'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse(is_disabled, 'expected enabled cloud-init') + self.assertEqual( + 'Cloud-init enabled by systemd cloud-init-generator', reason) + def test_status_returns_not_run(self): '''When status.json does not exist yet, return 'not run'.''' self.assertFalse( @@ -137,8 +150,9 @@ class TestStatus(CiTestCase): self.assertEqual(expected, m_stdout.getvalue()) def test_status_returns_running(self): - '''Report running when status file exists but isn't finished.''' - write_json(self.status_file, {'v1': {'init': {'finished': None}}}) + '''Report running when status exists with an unfinished stage.''' + write_json(self.status_file, + {'v1': {'init': {'start': 1, 'finished': None}}}) cmdargs = myargs(long=False, wait=False) with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: retcode = wrap_and_call( @@ -338,7 +352,8 @@ class TestStatus(CiTestCase): def test_status_main(self): '''status.main can be run as a standalone script.''' - write_json(self.status_file, {'v1': {'init': {'finished': None}}}) + write_json(self.status_file, + {'v1': {'init': {'start': 1, 'finished': None}}}) with self.assertRaises(SystemExit) as context_manager: with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: wrap_and_call( diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index eba58b02..4da3a588 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -194,6 +194,7 @@ def doexit(sysexit): def execmd(exe_args, output=None, data_in=None): + ret = 1 try: proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, stdout=output, stderr=subprocess.STDOUT) diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index a9d21e78..530808ce 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -276,9 +276,8 @@ class SubscriptionManager(object): cmd = ['attach', '--auto'] try: return_out, return_err = self._sub_man_cli(cmd) - except util.ProcessExecutionError: - self.log_warn("Auto-attach failed with: " - "{0}]".format(return_err.strip())) + except util.ProcessExecutionError as e: + self.log_warn("Auto-attach failed with: {0}".format(e)) return False for line in return_out.split("\n"): if line is not "": diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index bad112fe..aa468bca 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -116,6 +116,7 @@ class Distro(distros.Distro): (out, err) = util.subp(['ifconfig', '-a']) ifconfigoutput = [x for x in (out.strip()).splitlines() if len(x.split()) > 0] + bsddev = 'NOT_FOUND' for line in ifconfigoutput: m = re.match('^\w+', line) if m: @@ -347,15 +348,9 @@ class Distro(distros.Distro): bymac[Distro.get_interface_mac(n)] = { 'name': n, 'up': self.is_up(n), 'downable': None} + nics_with_addresses = set() if check_downable: - nics_with_addresses = set() - ipv6 = self.get_ipv6() - ipv4 = self.get_ipv4() - for bytes_out in (ipv6, ipv4): - for i in ipv6: - nics_with_addresses.update(i) - for i in ipv4: - nics_with_addresses.update(i) + nics_with_addresses = set(self.get_ipv4() + self.get_ipv6()) for d in bymac.values(): d['downable'] = (d['up'] is False or diff --git a/cloudinit/util.py b/cloudinit/util.py index 9976400f..338fb971 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1587,6 +1587,10 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): mtypes = list(mtype) elif mtype is None: mtypes = None + else: + raise TypeError( + 'Unsupported type provided for mtype parameter: {_type}'.format( + _type=type(mtype))) # clean up 'mtype' input a bit based on platform. platsys = platform.system().lower() -- cgit v1.2.3