From 41152f10ddbd8681cdac44b408038a4f23ab02df Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 17 Oct 2017 16:12:59 -0400 Subject: schema: Log debug instead of warning when jsonschema is not available. When operating in expected path, cloud-init should avoid logging with warning. That causes 'WARNING' messages in /var/log/cloud-init.log. By default, warnings also go to the console. Since jsonschema is a optional dependency, and not present on xenial and zesty, cloud-init should not warn there. Also here: * Add a test to integration tests to assert that there are no warnings in /var/log/cloud-init.log. * Update one integration test that did show warning and the related documentation and examples. LP: #1724354 --- cloudinit/config/cc_users_groups.py | 3 ++- cloudinit/config/schema.py | 2 +- doc/examples/cloud-config-user-groups.txt | 6 +++--- tests/cloud_tests/testcases/base.py | 4 ++++ tests/cloud_tests/testcases/examples/including_user_groups.py | 6 ++++++ tests/cloud_tests/testcases/examples/including_user_groups.yaml | 7 +++++-- tests/cloud_tests/testcases/modules/user_groups.py | 6 ++++++ tests/cloud_tests/testcases/modules/user_groups.yaml | 7 +++++-- 8 files changed, 32 insertions(+), 9 deletions(-) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index b80d1d36..f363000d 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -15,7 +15,8 @@ options, see the ``Including users and groups`` config example. Groups to add to the system can be specified as a list under the ``groups`` key. Each entry in the list should either contain a the group name as a string, or a dictionary with the group name as the key and a list of users who should -be members of the group as the value. +be members of the group as the value. **Note**: Groups are added before users, +so any users in a group list must already exist on the system. The ``users`` config key takes a list of users to configure. The first entry in this list is used as the default user for the system. To preserve the standard diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index bb291ff8..ca7d0d5b 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -74,7 +74,7 @@ def validate_cloudconfig_schema(config, schema, strict=False): try: from jsonschema import Draft4Validator, FormatChecker except ImportError: - logging.warning( + logging.debug( 'Ignoring schema validation. python-jsonschema is not present') return validator = Draft4Validator(schema, format_checker=FormatChecker()) diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 9c5202f5..0554d1f7 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -1,8 +1,8 @@ # Add groups to the system -# The following example adds the ubuntu group with members foo and bar and -# the group cloud-users. +# The following example adds the ubuntu group with members 'root' and 'sys' +# and the empty group cloud-users. groups: - - ubuntu: [foo,bar] + - ubuntu: [root,sys] - cloud-users # Add users to the system. Users are added after groups are added. diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index bb545ab9..b2b5b4b1 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -72,6 +72,10 @@ class CloudTestCase(unittest.TestCase): result = self.get_status_data(self.get_data_file('result.json')) self.assertEqual(len(result['errors']), 0) + def test_no_warnings_in_log(self): + """Warnings should not be found in the log.""" + self.assertNotIn("WARN", self.get_data_file('cloud-init.log')) + class PasswordListTest(CloudTestCase): """Base password test case class.""" diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.py b/tests/cloud_tests/testcases/examples/including_user_groups.py index 67af527b..93b7a82d 100644 --- a/tests/cloud_tests/testcases/examples/including_user_groups.py +++ b/tests/cloud_tests/testcases/examples/including_user_groups.py @@ -40,4 +40,10 @@ class TestUserGroups(base.CloudTestCase): out = self.get_data_file('user_cloudy') self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:') + def test_user_root_in_secret(self): + """Test root user is in 'secret' group.""" + user, _, groups = self.get_data_file('root_groups').partition(":") + self.assertIn("secret", groups.split(), + msg="User root is not in group 'secret'") + # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.yaml b/tests/cloud_tests/testcases/examples/including_user_groups.yaml index 0aa7ad21..469d03c3 100644 --- a/tests/cloud_tests/testcases/examples/including_user_groups.yaml +++ b/tests/cloud_tests/testcases/examples/including_user_groups.yaml @@ -8,7 +8,7 @@ cloud_config: | #cloud-config # Add groups to the system groups: - - secret: [foobar,barfoo] + - secret: [root] - cloud-users # Add users to the system. Users are added after groups are added. @@ -24,7 +24,7 @@ cloud_config: | - name: barfoo gecos: Bar B. Foo sudo: ALL=(ALL) NOPASSWD:ALL - groups: cloud-users + groups: [cloud-users, secret] lock_passwd: true - name: cloudy gecos: Magic Cloud App Daemon User @@ -49,5 +49,8 @@ collect_scripts: user_cloudy: | #!/bin/bash getent passwd cloudy + root_groups: | + #!/bin/bash + groups root # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/user_groups.py b/tests/cloud_tests/testcases/modules/user_groups.py index 67af527b..93b7a82d 100644 --- a/tests/cloud_tests/testcases/modules/user_groups.py +++ b/tests/cloud_tests/testcases/modules/user_groups.py @@ -40,4 +40,10 @@ class TestUserGroups(base.CloudTestCase): out = self.get_data_file('user_cloudy') self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:') + def test_user_root_in_secret(self): + """Test root user is in 'secret' group.""" + user, _, groups = self.get_data_file('root_groups').partition(":") + self.assertIn("secret", groups.split(), + msg="User root is not in group 'secret'") + # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/user_groups.yaml b/tests/cloud_tests/testcases/modules/user_groups.yaml index 71cc9da3..22b5d706 100644 --- a/tests/cloud_tests/testcases/modules/user_groups.yaml +++ b/tests/cloud_tests/testcases/modules/user_groups.yaml @@ -7,7 +7,7 @@ cloud_config: | #cloud-config # Add groups to the system groups: - - secret: [foobar,barfoo] + - secret: [root] - cloud-users # Add users to the system. Users are added after groups are added. @@ -23,7 +23,7 @@ cloud_config: | - name: barfoo gecos: Bar B. Foo sudo: ALL=(ALL) NOPASSWD:ALL - groups: cloud-users + groups: [cloud-users, secret] lock_passwd: true - name: cloudy gecos: Magic Cloud App Daemon User @@ -48,5 +48,8 @@ collect_scripts: user_cloudy: | #!/bin/bash getent passwd cloudy + root_groups: | + #!/bin/bash + groups root # vi: ts=4 expandtab -- cgit v1.2.3 From ee90a6cda4083479d5e9e2aa26887d3db91a3785 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 19 Oct 2017 12:58:12 -0600 Subject: tools: disable fastestmirror if using proxy Per centos documentation using the fastestmirror plugin is effective at finding the fastest mirror, unless you are behind a proxy. In that case you should disable it. Therefore, in our tests if we are setting the proxy we should also disable the fastestmirror plugin. --- tools/run-centos | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/run-centos b/tools/run-centos index d44d5145..e87b2025 100755 --- a/tools/run-centos +++ b/tools/run-centos @@ -153,6 +153,7 @@ start_container() { if [ ! -z "${http_proxy-}" ]; then debug 1 "configuring proxy ${http_proxy}" inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf" + inside "$name" sed -i s/enabled=1/enabled=0/ /etc/yum/pluginconf.d/fastestmirror.conf fi } -- cgit v1.2.3 From 6bc504e41666329631cdfd5b947ed5b0e2529a76 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 20 Oct 2017 13:24:22 -0600 Subject: ntp: fix config module schema to allow empty ntp config Fix three things related to the ntp module: 1. Fix invalid cloud-config schema in the integration test which provided empty dicts instead of emptylists for pools and servers 2. Correct logic in the ntp module to allow support for the minimal cloud-config 'ntp:' without raising a RuntimeError. Docs and schema definitions already describe that cloud-config's ntp can be empty. An ntp configuration with neither pools nor servers will be configured with a default set of ntp pools. As such, the ntp module now officially allows the following ntp cloud-configs: - ntp: - ntp: {} - ntp: servers: [] pools: [] 3. Add a simple unit test which validates all cloud-config provided to our integration tests to ensure it adheres to any defined module schema so as more jsonschema definitions are added, we validate our integration test configs. LP: #1724951 --- cloudinit/config/cc_ntp.py | 4 ++- tests/cloud_tests/testcases/modules/ntp.yaml | 4 +-- tests/unittests/test_handler/test_handler_ntp.py | 23 ++++++++------- tests/unittests/test_handler/test_schema.py | 37 +++++++++++++++++++++++- 4 files changed, 53 insertions(+), 15 deletions(-) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 15ae1ecd..d43d060c 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -100,7 +100,9 @@ def handle(name, cfg, cloud, log, _args): LOG.debug( "Skipping module named %s, not present or disabled by cfg", name) return - ntp_cfg = cfg.get('ntp', {}) + ntp_cfg = cfg['ntp'] + if ntp_cfg is None: + ntp_cfg = {} # Allow empty config which will install the package # TODO drop this when validate_cloudconfig_schema is strict=True if not isinstance(ntp_cfg, (dict)): diff --git a/tests/cloud_tests/testcases/modules/ntp.yaml b/tests/cloud_tests/testcases/modules/ntp.yaml index fbef431b..2530d72e 100644 --- a/tests/cloud_tests/testcases/modules/ntp.yaml +++ b/tests/cloud_tests/testcases/modules/ntp.yaml @@ -4,8 +4,8 @@ cloud_config: | #cloud-config ntp: - pools: {} - servers: {} + pools: [] + servers: [] collect_scripts: ntp_installed: | #!/bin/bash diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 4f291248..3abe5786 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -293,23 +293,24 @@ class TestNtp(FilesystemMockingTestCase): def test_ntp_handler_schema_validation_allows_empty_ntp_config(self): """Ntp schema validation allows for an empty ntp: configuration.""" - invalid_config = {'ntp': {}} + valid_empty_configs = [{'ntp': {}}, {'ntp': None}] distro = 'ubuntu' cc = self._get_cloud(distro) ntp_conf = os.path.join(self.new_root, 'ntp.conf') with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) + for valid_empty_config in valid_empty_configs: + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.handle('cc_ntp', valid_empty_config, cc, None, []) + with open(ntp_conf) as stream: + content = stream.read() + default_pools = [ + "{0}.{1}.pool.ntp.org".format(x, distro) + for x in range(0, cc_ntp.NR_POOL_SERVERS)] + self.assertEqual( + "servers []\npools {0}\n".format(default_pools), + content) self.assertNotIn('Invalid config:', self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - default_pools = [ - "{0}.{1}.pool.ntp.org".format(x, distro) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] - self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content) @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency") def test_ntp_handler_schema_validation_warns_non_string_item_type(self): diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index b8fc8930..648573f6 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -4,11 +4,12 @@ from cloudinit.config.schema import ( CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, get_schema_doc, get_schema, validate_cloudconfig_file, validate_cloudconfig_schema, main) -from cloudinit.util import write_file +from cloudinit.util import subp, write_file from cloudinit.tests.helpers import CiTestCase, mock, skipIf from copy import copy +import os from six import StringIO from textwrap import dedent from yaml import safe_load @@ -364,4 +365,38 @@ class MainTest(CiTestCase): self.assertIn( 'Valid cloud-config file {0}'.format(myyaml), m_stdout.getvalue()) + +class CloudTestsIntegrationTest(CiTestCase): + """Validate all cloud-config yaml schema provided in integration tests. + + It is less expensive to have unittests validate schema of all cloud-config + yaml provided to integration tests, than to run an integration test which + raises Warnings or errors on invalid cloud-config schema. + """ + + @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency") + def test_all_integration_test_cloud_config_schema(self): + """Validate schema of cloud_tests yaml files looking for warnings.""" + schema = get_schema() + testsdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + integration_testdir = os.path.sep.join( + [testsdir, 'cloud_tests', 'testcases']) + errors = [] + out, _ = subp(['find', integration_testdir, '-name', '*yaml']) + for filename in out.splitlines(): + test_cfg = safe_load(open(filename)) + cloud_config = test_cfg.get('cloud_config') + if cloud_config: + cloud_config = safe_load( + cloud_config.replace("#cloud-config\n", "")) + try: + validate_cloudconfig_schema( + cloud_config, schema, strict=True) + except SchemaValidationError as e: + errors.append( + '{0}: {1}'.format( + filename, e)) + if errors: + raise AssertionError(', '.join(errors)) + # vi: ts=4 expandtab syntax=python -- cgit v1.2.3 From a51968d50e7cdecabaf255c26386fd82f6640cc3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 20 Oct 2017 14:58:24 -0400 Subject: citest: show the class actual class name in results. Tests are currently run by creating a temporary subclass of each class and then executing it (in get_suites). When running the tests suite the output would contain the temporary class name. That was less than useful, and made batch runs almost impossible to identify which test case had an error. This change goes from output of: FAIL: test_no_warnings_in_log \ (tests.cloud_tests.testcases.get_suite..tmp) To FAIL: test_no_warnings_in_log \ (tests.cloud_tests.testcases.modules.ntp.TestNtp) --- tests/cloud_tests/testcases/__init__.py | 7 +++++++ tests/cloud_tests/testcases/base.py | 4 ---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py index 47217ce6..a29a0928 100644 --- a/tests/cloud_tests/testcases/__init__.py +++ b/tests/cloud_tests/testcases/__init__.py @@ -5,6 +5,7 @@ import importlib import inspect import unittest +from unittest.util import strclass from tests.cloud_tests import config from tests.cloud_tests.testcases.base import CloudTestCase as base_test @@ -37,6 +38,12 @@ def get_suite(test_name, data, conf): class tmp(test_class): + _realclass = test_class + + def __str__(self): + return "%s (%s)" % (self._testMethodName, + strclass(self._realclass)) + @classmethod def setUpClass(cls): cls.data = data diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index b2b5b4b1..d3586e3c 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -16,10 +16,6 @@ class CloudTestCase(unittest.TestCase): conf = None _cloud_config = None - def shortDescription(self): - """Prevent nose from using docstrings.""" - return None - @property def cloud_config(self): """Get the cloud-config used by the test.""" -- cgit v1.2.3 From c06eea972eb4b7bfa68f4f542f2fb67ea1d455ac Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 20 Oct 2017 16:05:04 -0600 Subject: citest: fix remaining warnings raised by integration tests. There was fallout in a full integration test run from my adding of test_no_warnings_in_log which asserted that there could not be a WARNING found in the /var/log/cloud-init.log This fixes 2 of the cases: * TestCommandOutputSimple had a valid WARNING written, so adjust its test case to allow for that. * TestLxdDir had a valid config in the test but the module would log a WARNING, so fix the module. Also updates lxd unit tests to look for WARN themselves. --- cloudinit/config/cc_lxd.py | 2 +- tests/cloud_tests/testcases/base.py | 6 +++++- .../cloud_tests/testcases/main/command_output_simple.py | 16 ++++++++++++++++ tests/unittests/test_handler/test_handler_lxd.py | 16 ++++++++-------- 4 files changed, 30 insertions(+), 10 deletions(-) diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index e6262f8c..09374d2e 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -72,7 +72,7 @@ def handle(name, cfg, cloud, log, args): type(init_cfg)) init_cfg = {} - bridge_cfg = lxd_cfg.get('bridge') + bridge_cfg = lxd_cfg.get('bridge', {}) if not isinstance(bridge_cfg, dict): log.warn("lxd/bridge config must be a dictionary. found a '%s'", type(bridge_cfg)) diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index d3586e3c..1706f59b 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -70,7 +70,11 @@ class CloudTestCase(unittest.TestCase): def test_no_warnings_in_log(self): """Warnings should not be found in the log.""" - self.assertNotIn("WARN", self.get_data_file('cloud-init.log')) + self.assertEqual( + [], + [l for l in self.get_data_file('cloud-init.log').splitlines() + if 'WARN' in l], + msg="'WARN' found inside cloud-init.log") class PasswordListTest(CloudTestCase): diff --git a/tests/cloud_tests/testcases/main/command_output_simple.py b/tests/cloud_tests/testcases/main/command_output_simple.py index fe4c7670..857881cb 100644 --- a/tests/cloud_tests/testcases/main/command_output_simple.py +++ b/tests/cloud_tests/testcases/main/command_output_simple.py @@ -15,4 +15,20 @@ class TestCommandOutputSimple(base.CloudTestCase): data.splitlines()[-1].strip()) # TODO: need to test that all stages redirected here + def test_no_warnings_in_log(self): + """Warnings should not be found in the log. + + This class redirected stderr and stdout, so it expects to find + a warning in cloud-init.log to that effect.""" + redirect_msg = 'Stdout, stderr changing to' + warnings = [ + l for l in self.get_data_file('cloud-init.log').splitlines() + if 'WARN' in l] + self.assertEqual( + [], [w for w in warnings if redirect_msg not in w], + msg="'WARN' found inside cloud-init.log") + self.assertEqual( + 1, len(warnings), + msg="Did not find %s in cloud-init.log" % redirect_msg) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index f132a778..e0d9ab6c 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -5,17 +5,16 @@ from cloudinit.sources import DataSourceNoCloud from cloudinit import (distros, helpers, cloud) from cloudinit.tests import helpers as t_help -import logging - try: from unittest import mock except ImportError: import mock -LOG = logging.getLogger(__name__) +class TestLxd(t_help.CiTestCase): + + with_logs = True -class TestLxd(t_help.TestCase): lxd_cfg = { 'lxd': { 'init': { @@ -41,7 +40,7 @@ class TestLxd(t_help.TestCase): def test_lxd_init(self, mock_util): cc = self._get_cloud('ubuntu') mock_util.which.return_value = True - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, LOG, []) + cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) self.assertTrue(mock_util.which.called) init_call = mock_util.subp.call_args_list[0][0][0] self.assertEqual(init_call, @@ -55,7 +54,8 @@ class TestLxd(t_help.TestCase): cc = self._get_cloud('ubuntu') cc.distro = mock.MagicMock() mock_util.which.return_value = None - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, LOG, []) + cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) + self.assertNotIn('WARN', self.logs.getvalue()) self.assertTrue(cc.distro.install_packages.called) install_pkg = cc.distro.install_packages.call_args_list[0][0][0] self.assertEqual(sorted(install_pkg), ['lxd', 'zfs']) @@ -64,7 +64,7 @@ class TestLxd(t_help.TestCase): def test_no_init_does_nothing(self, mock_util): cc = self._get_cloud('ubuntu') cc.distro = mock.MagicMock() - cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, LOG, []) + cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_util.subp.called) @@ -72,7 +72,7 @@ class TestLxd(t_help.TestCase): def test_no_lxd_does_nothing(self, mock_util): cc = self._get_cloud('ubuntu') cc.distro = mock.MagicMock() - cc_lxd.handle('cc_lxd', {'package_update': True}, cc, LOG, []) + cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_util.subp.called) -- cgit v1.2.3 From 5b6fd3ae353dd65e57ab138d7dca640d1c88d32c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 23 Oct 2017 14:11:38 -0400 Subject: tools: make yum package installation more reliable During continuous integration tests, we're seeing quite a lot of unreliablity when running 'yum install'. The change here is to move to re-trying a run of 'yum install --downloadonly' for 10 times or until it succeeds. Then afterwards, running yum install from the cache. This seems safer in general than just re-trying an install operation, since we are specifically affected by the download phase failing. Also present are some flake8 fixes to tools/read-dependencies. --- tools/read-dependencies | 41 ++++++++++++++++++++++++++++++++++++----- tools/run-centos | 17 ++++++++++++++++- 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/tools/read-dependencies b/tools/read-dependencies index 2a648680..421f470a 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -30,9 +30,35 @@ DISTRO_PKG_TYPE_MAP = { 'suse': 'suse' } -DISTRO_INSTALL_PKG_CMD = { +MAYBE_RELIABLE_YUM_INSTALL = [ + 'sh', '-c', + """ + error() { echo "$@" 1>&2; } + n=0; max=10; + bcmd="yum install --downloadonly --assumeyes --setopt=keepcache=1" + while n=$(($n+1)); do + error ":: running $bcmd $* [$n/$max]" + $bcmd "$@" + r=$? + [ $r -eq 0 ] && break + [ $n -ge $max ] && { error "gave up on $bcmd"; exit $r; } + nap=$(($n*5)) + error ":: failed [$r] ($n/$max). sleeping $nap." + sleep $nap + done + error ":: running yum install --cacheonly --assumeyes $*" + yum install --cacheonly --assumeyes "$@" + """, + 'reliable-yum-install'] + +DRY_DISTRO_INSTALL_PKG_CMD = { 'centos': ['yum', 'install', '--assumeyes'], 'redhat': ['yum', 'install', '--assumeyes'], +} + +DISTRO_INSTALL_PKG_CMD = { + 'centos': MAYBE_RELIABLE_YUM_INSTALL, + 'redhat': MAYBE_RELIABLE_YUM_INSTALL, 'debian': ['apt', 'install', '-y'], 'ubuntu': ['apt', 'install', '-y'], 'opensuse': ['zypper', 'install'], @@ -80,8 +106,8 @@ def get_parser(): help='Additionally install continuous integration system packages ' 'required for build and test automation.') parser.add_argument( - '-v', '--python-version', type=str, dest='python_version', default=None, - choices=["2", "3"], + '-v', '--python-version', type=str, dest='python_version', + default=None, choices=["2", "3"], help='Override the version of python we want to generate system ' 'package dependencies for. Defaults to the version of python ' 'this script is called with') @@ -219,10 +245,15 @@ def pkg_install(pkg_list, distro, test_distro=False, dry_run=False): '(dryrun)' if dry_run else '', ' '.join(pkg_list))) install_cmd = [] if dry_run: - install_cmd.append('echo') + install_cmd.append('echo') if os.geteuid() != 0: install_cmd.append('sudo') - install_cmd.extend(DISTRO_INSTALL_PKG_CMD[distro]) + + cmd = DISTRO_INSTALL_PKG_CMD[distro] + if dry_run and distro in DRY_DISTRO_INSTALL_PKG_CMD: + cmd = DRY_DISTRO_INSTALL_PKG_CMD[distro] + install_cmd.extend(cmd) + if distro in ['centos', 'redhat']: # CentOS and Redhat need epel-release to access oauthlib and jsonschema subprocess.check_call(install_cmd + ['epel-release']) diff --git a/tools/run-centos b/tools/run-centos index e87b2025..d58ef3e8 100755 --- a/tools/run-centos +++ b/tools/run-centos @@ -123,7 +123,22 @@ prep() { return 0 fi error "Installing prep packages: ${needed}" - yum install --assumeyes ${needed} + set -- $needed + local n max r + n=0; max=10; + bcmd="yum install --downloadonly --assumeyes --setopt=keepcache=1" + while n=$(($n+1)); do + error ":: running $bcmd $* [$n/$max]" + $bcmd "$@" + r=$? + [ $r -eq 0 ] && break + [ $n -ge $max ] && { error "gave up on $bcmd"; exit $r; } + nap=$(($n*5)) + error ":: failed [$r] ($n/$max). sleeping $nap." + sleep $nap + done + error ":: running yum install --cacheonly --assumeyes $*" + yum install --cacheonly --assumeyes "$@" } start_container() { -- cgit v1.2.3 From 17a15f9e0ae78e4fc4e24fab0caebdf78f06ef66 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 23 Oct 2017 14:34:23 -0600 Subject: resizefs: Fix regression when system booted with root=PARTUUID= A recent cleanup of the resizefs module broke resizing when a system was booted with root=PARTUUID= and the device /dev/root does not exist. This path is exposed with the Ubuntu 16.04 but not with Ubuntu 17.10. A recreate exists under bug 1684869. LP: #1725067 --- cloudinit/config/cc_resizefs.py | 43 ++++------ .../test_handler/test_handler_resizefs.py | 91 ++++++++++++++-------- 2 files changed, 70 insertions(+), 64 deletions(-) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index f774baa3..0d282e63 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -145,25 +145,6 @@ RESIZE_FS_PRECHECK_CMDS = { } -def rootdev_from_cmdline(cmdline): - found = None - for tok in cmdline.split(): - if tok.startswith("root="): - found = tok[5:] - break - if found is None: - return None - - if found.startswith("/dev/"): - return found - if found.startswith("LABEL="): - return "/dev/disk/by-label/" + found[len("LABEL="):] - if found.startswith("UUID="): - return "/dev/disk/by-uuid/" + found[len("UUID="):] - - return "/dev/" + found - - def can_skip_resize(fs_type, resize_what, devpth): fstype_lc = fs_type.lower() for i, func in RESIZE_FS_PRECHECK_CMDS.items(): @@ -172,14 +153,15 @@ def can_skip_resize(fs_type, resize_what, devpth): return False -def is_device_path_writable_block(devpath, info, log): - """Return True if devpath is a writable block device. +def maybe_get_writable_device_path(devpath, info, log): + """Return updated devpath if the devpath is a writable block device. - @param devpath: Path to the root device we want to resize. + @param devpath: Requested path to the root device we want to resize. @param info: String representing information about the requested device. @param log: Logger to which logs will be added upon error. - @returns Boolean True if block device is writable + @returns devpath or updated devpath per kernel commandline if the device + path is a writable block device, returns None otherwise. """ container = util.is_container() @@ -189,12 +171,12 @@ def is_device_path_writable_block(devpath, info, log): devpath = util.rootdev_from_cmdline(util.get_cmdline()) if devpath is None: log.warn("Unable to find device '/dev/root'") - return False + return None log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) if devpath == 'overlayroot': log.debug("Not attempting to resize devpath '%s': %s", devpath, info) - return False + return None try: statret = os.stat(devpath) @@ -207,7 +189,7 @@ def is_device_path_writable_block(devpath, info, log): devpath, info) else: raise exc - return False + return None if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): if container: @@ -216,8 +198,8 @@ def is_device_path_writable_block(devpath, info, log): else: log.warn("device '%s' not a block device. cannot resize: %s" % (devpath, info)) - return False - return True + return None + return devpath # The writable block devpath def handle(name, cfg, _cloud, log, args): @@ -242,8 +224,9 @@ def handle(name, cfg, _cloud, log, args): info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) log.debug("resize_info: %s" % info) - if not is_device_path_writable_block(devpth, info, log): - return + devpth = maybe_get_writable_device_path(devpth, info, log) + if not devpth: + return # devpath was not a writable block device resizer = None if can_skip_resize(fs_type, resize_what, devpth): diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 3e5d436c..29d5574d 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -1,9 +1,9 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.config.cc_resizefs import ( - can_skip_resize, handle, is_device_path_writable_block, - rootdev_from_cmdline) + can_skip_resize, handle, maybe_get_writable_device_path) +from collections import namedtuple import logging import textwrap @@ -138,47 +138,48 @@ class TestRootDevFromCmdline(CiTestCase): invalid_cases = [ 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', ''] for case in invalid_cases: - self.assertIsNone(rootdev_from_cmdline(case)) + self.assertIsNone(util.rootdev_from_cmdline(case)) def test_rootdev_from_cmdline_with_root_startswith_dev(self): """Return the cmdline root when the path starts with /dev.""" self.assertEqual( - '/dev/this', rootdev_from_cmdline('asdf root=/dev/this')) + '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this')) def test_rootdev_from_cmdline_with_root_without_dev_prefix(self): """Add /dev prefix to cmdline root when the path lacks the prefix.""" - self.assertEqual('/dev/this', rootdev_from_cmdline('asdf root=this')) + self.assertEqual( + '/dev/this', util.rootdev_from_cmdline('asdf root=this')) def test_rootdev_from_cmdline_with_root_with_label(self): """When cmdline root contains a LABEL, our root is disk/by-label.""" self.assertEqual( '/dev/disk/by-label/unique', - rootdev_from_cmdline('asdf root=LABEL=unique')) + util.rootdev_from_cmdline('asdf root=LABEL=unique')) def test_rootdev_from_cmdline_with_root_with_uuid(self): """When cmdline root contains a UUID, our root is disk/by-uuid.""" self.assertEqual( '/dev/disk/by-uuid/adsfdsaf-adsf', - rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf')) + util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf')) -class TestIsDevicePathWritableBlock(CiTestCase): +class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): with_logs = True - def test_is_device_path_writable_block_false_on_overlayroot(self): + def test_maybe_get_writable_device_path_none_on_overlayroot(self): """When devpath is overlayroot (on MAAS), is_dev_writable is False.""" info = 'does not matter' - is_writable = wrap_and_call( + devpath = wrap_and_call( 'cloudinit.config.cc_resizefs.util', {'is_container': {'return_value': False}}, - is_device_path_writable_block, 'overlayroot', info, LOG) - self.assertFalse(is_writable) + maybe_get_writable_device_path, 'overlayroot', info, LOG) + self.assertIsNone(devpath) self.assertIn( "Not attempting to resize devpath 'overlayroot'", self.logs.getvalue()) - def test_is_device_path_writable_block_warns_missing_cmdline_root(self): + def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self): """When root does not exist isn't in the cmdline, log warning.""" info = 'does not matter' @@ -190,43 +191,43 @@ class TestIsDevicePathWritableBlock(CiTestCase): exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' with mock.patch(exists_mock_path) as m_exists: m_exists.return_value = False - is_writable = wrap_and_call( + devpath = wrap_and_call( 'cloudinit.config.cc_resizefs.util', {'is_container': {'return_value': False}, 'get_mount_info': {'side_effect': fake_mount_info}, 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, - is_device_path_writable_block, '/dev/root', info, LOG) - self.assertFalse(is_writable) + maybe_get_writable_device_path, '/dev/root', info, LOG) + self.assertIsNone(devpath) logs = self.logs.getvalue() self.assertIn("WARNING: Unable to find device '/dev/root'", logs) - def test_is_device_path_writable_block_does_not_exist(self): + def test_maybe_get_writable_device_path_does_not_exist(self): """When devpath does not exist, a warning is logged.""" info = 'dev=/I/dont/exist mnt_point=/ path=/dev/none' - is_writable = wrap_and_call( + devpath = wrap_and_call( 'cloudinit.config.cc_resizefs.util', {'is_container': {'return_value': False}}, - is_device_path_writable_block, '/I/dont/exist', info, LOG) - self.assertFalse(is_writable) + maybe_get_writable_device_path, '/I/dont/exist', info, LOG) + self.assertIsNone(devpath) self.assertIn( "WARNING: Device '/I/dont/exist' did not exist." ' cannot resize: %s' % info, self.logs.getvalue()) - def test_is_device_path_writable_block_does_not_exist_in_container(self): + def test_maybe_get_writable_device_path_does_not_exist_in_container(self): """When devpath does not exist in a container, log a debug message.""" info = 'dev=/I/dont/exist mnt_point=/ path=/dev/none' - is_writable = wrap_and_call( + devpath = wrap_and_call( 'cloudinit.config.cc_resizefs.util', {'is_container': {'return_value': True}}, - is_device_path_writable_block, '/I/dont/exist', info, LOG) - self.assertFalse(is_writable) + maybe_get_writable_device_path, '/I/dont/exist', info, LOG) + self.assertIsNone(devpath) self.assertIn( "DEBUG: Device '/I/dont/exist' did not exist in container." ' cannot resize: %s' % info, self.logs.getvalue()) - def test_is_device_path_writable_block_raises_oserror(self): + def test_maybe_get_writable_device_path_raises_oserror(self): """When unexpected OSError is raises by os.stat it is reraised.""" info = 'dev=/I/dont/exist mnt_point=/ path=/dev/none' with self.assertRaises(OSError) as context_manager: @@ -234,41 +235,63 @@ class TestIsDevicePathWritableBlock(CiTestCase): 'cloudinit.config.cc_resizefs', {'util.is_container': {'return_value': True}, 'os.stat': {'side_effect': OSError('Something unexpected')}}, - is_device_path_writable_block, '/I/dont/exist', info, LOG) + maybe_get_writable_device_path, '/I/dont/exist', info, LOG) self.assertEqual( 'Something unexpected', str(context_manager.exception)) - def test_is_device_path_writable_block_non_block(self): + def test_maybe_get_writable_device_path_non_block(self): """When device is not a block device, emit warning return False.""" fake_devpath = self.tmp_path('dev/readwrite') util.write_file(fake_devpath, '', mode=0o600) # read-write info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) - is_writable = wrap_and_call( + devpath = wrap_and_call( 'cloudinit.config.cc_resizefs.util', {'is_container': {'return_value': False}}, - is_device_path_writable_block, fake_devpath, info, LOG) - self.assertFalse(is_writable) + maybe_get_writable_device_path, fake_devpath, info, LOG) + self.assertIsNone(devpath) self.assertIn( "WARNING: device '{0}' not a block device. cannot resize".format( fake_devpath), self.logs.getvalue()) - def test_is_device_path_writable_block_non_block_on_container(self): + def test_maybe_get_writable_device_path_non_block_on_container(self): """When device is non-block device in container, emit debug log.""" fake_devpath = self.tmp_path('dev/readwrite') util.write_file(fake_devpath, '', mode=0o600) # read-write info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) - is_writable = wrap_and_call( + devpath = wrap_and_call( 'cloudinit.config.cc_resizefs.util', {'is_container': {'return_value': True}}, - is_device_path_writable_block, fake_devpath, info, LOG) - self.assertFalse(is_writable) + maybe_get_writable_device_path, fake_devpath, info, LOG) + self.assertIsNone(devpath) self.assertIn( "DEBUG: device '{0}' not a block device in container." ' cannot resize'.format(fake_devpath), self.logs.getvalue()) + def test_maybe_get_writable_device_path_returns_cmdline_root(self): + """When root device is UUID in kernel commandline, update devpath.""" + # XXX Long-term we want to use FilesystemMocking test to avoid + # touching os.stat. + FakeStat = namedtuple( + 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def. + info = 'dev=/dev/root mnt_point=/ path=/does/not/matter' + devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs', + {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'}, + 'util.is_container': False, + 'os.path.exists': False, # /dev/root doesn't exist + 'os.stat': { + 'return_value': FakeStat(25008, 0, 1)} # char block device + }, + maybe_get_writable_device_path, '/dev/root', info, LOG) + self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath) + self.assertIn( + "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'" + " per kernel cmdline", + self.logs.getvalue()) + # vi: ts=4 expandtab -- cgit v1.2.3