diff options
author | Chad Smith <chad.smith@canonical.com> | 2018-10-03 12:10:23 -0600 |
---|---|---|
committer | Chad Smith <chad.smith@canonical.com> | 2018-10-03 12:10:23 -0600 |
commit | d6347e1c439eda7f43d9620dac2b461e980e1ae9 (patch) | |
tree | 08410263488d11a2a29edcc620575ed1b028100e /tests | |
parent | 564793a76b9c9add1ee81bab4919c8dccd45a33d (diff) | |
parent | e28000457591bde9f22d6b7a538b1fc33349d780 (diff) | |
download | vyos-cloud-init-d6347e1c439eda7f43d9620dac2b461e980e1ae9.tar.gz vyos-cloud-init-d6347e1c439eda7f43d9620dac2b461e980e1ae9.zip |
merge from master at 18.4
Diffstat (limited to 'tests')
45 files changed, 3066 insertions, 942 deletions
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 75b50616..642745d8 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -9,6 +9,7 @@ from cloudinit import util as c_util from tests.cloud_tests import (config, LOG, setup_image, util) from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single) from tests.cloud_tests import platforms +from tests.cloud_tests.testcases import base, get_test_class def collect_script(instance, base_dir, script, script_name): @@ -63,6 +64,7 @@ def collect_test_data(args, snapshot, os_name, test_name): res = ({}, 1) # load test config + test_name_in = test_name test_name = config.path_to_name(test_name) test_config = config.load_test_config(test_name) user_data = test_config['cloud_config'] @@ -75,6 +77,16 @@ def collect_test_data(args, snapshot, os_name, test_name): LOG.warning('test config %s is not enabled, skipping', test_name) return ({}, 0) + test_class = get_test_class( + config.name_to_module(test_name_in), + test_data={'platform': snapshot.platform_name, 'os_name': os_name}, + test_conf=test_config['cloud_config']) + try: + test_class.maybeSkipTest() + except base.SkipTest as s: + LOG.warning('skipping test config %s: %s', test_name, s) + return ({}, 0) + # if testcase requires a feature flag that the image does not support, # skip the testcase with a warning req_features = test_config.get('required_features', []) diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py index 95bc3b16..529e79cd 100644 --- a/tests/cloud_tests/platforms/instances.py +++ b/tests/cloud_tests/platforms/instances.py @@ -97,7 +97,8 @@ class Instance(TargetBase): return self._ssh_client if not self.ssh_ip or not self.ssh_port: - raise ValueError + raise ValueError("Cannot ssh_connect, ssh_ip=%s ssh_port=%s" % + (self.ssh_ip, self.ssh_port)) client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index d396519f..83c97ab4 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -12,6 +12,8 @@ from tests.cloud_tests.util import PlatformError from ..instances import Instance +from pylxd import exceptions as pylxd_exc + class LXDInstance(Instance): """LXD container backed instance.""" @@ -30,6 +32,9 @@ class LXDInstance(Instance): @param config: image config @param features: supported feature flags """ + if not pylxd_container: + raise ValueError("Invalid value pylxd_container: %s" % + pylxd_container) self._pylxd_container = pylxd_container super(LXDInstance, self).__init__( platform, name, properties, config, features) @@ -40,9 +45,19 @@ class LXDInstance(Instance): @property def pylxd_container(self): """Property function.""" + if self._pylxd_container is None: + raise RuntimeError( + "%s: Attempted use of pylxd_container after deletion." % self) self._pylxd_container.sync() return self._pylxd_container + def __str__(self): + return ( + '%s(name=%s) status=%s' % + (self.__class__.__name__, self.name, + ("deleted" if self._pylxd_container is None else + self.pylxd_container.status))) + def _execute(self, command, stdin=None, env=None): if env is None: env = {} @@ -165,10 +180,27 @@ class LXDInstance(Instance): self.shutdown(wait=wait) self.start(wait=wait) - def shutdown(self, wait=True): + def shutdown(self, wait=True, retry=1): """Shutdown instance.""" - if self.pylxd_container.status != 'Stopped': + if self.pylxd_container.status == 'Stopped': + return + + try: + LOG.debug("%s: shutting down (wait=%s)", self, wait) self.pylxd_container.stop(wait=wait) + except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e: + # An exception happens here sometimes (LP: #1783198) + # LOG it, and try again. + LOG.warning( + ("%s: shutdown(retry=%d) caught %s in shutdown " + "(response=%s): %s"), + self, retry, e.__class__.__name__, e.response, e) + if isinstance(e, pylxd_exc.NotFound): + LOG.debug("container_exists(%s) == %s", + self.name, self.platform.container_exists(self.name)) + if retry == 0: + raise e + return self.shutdown(wait=wait, retry=retry - 1) def start(self, wait=True, wait_for_cloud_init=False): """Start instance.""" @@ -189,12 +221,14 @@ class LXDInstance(Instance): def destroy(self): """Clean up instance.""" + LOG.debug("%s: deleting container.", self) self.unfreeze() self.shutdown() self.pylxd_container.delete(wait=True) + self._pylxd_container = None + if self.platform.container_exists(self.name): - raise OSError('container {} was not properly removed' - .format(self.name)) + raise OSError('%s: container was not properly removed' % self) if self._console_log_file and os.path.exists(self._console_log_file): os.unlink(self._console_log_file) shutil.rmtree(self.tmpd) diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py index 4e195709..39f4517f 100644 --- a/tests/cloud_tests/setup_image.py +++ b/tests/cloud_tests/setup_image.py @@ -4,6 +4,7 @@ from functools import partial import os +import yaml from tests.cloud_tests import LOG from tests.cloud_tests import stage, util @@ -220,7 +221,14 @@ def setup_image(args, image): calls = [partial(stage.run_single, desc, partial(func, args, image)) for name, func, desc in handlers if getattr(args, name, None)] - LOG.info('setting up %s', image) + try: + data = yaml.load(image.read_data("/etc/cloud/build.info", decode=True)) + info = ' '.join(["%s=%s" % (k, data.get(k)) + for k in ("build_name", "serial") if k in data]) + except Exception as e: + info = "N/A (%s)" % e + + LOG.info('setting up %s (%s)', image, info) res = stage.run_stage( 'set up for {}'.format(image), calls, continue_after_error=False) return res diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml index a16d1ddf..fb9a5d27 100644 --- a/tests/cloud_tests/testcases.yaml +++ b/tests/cloud_tests/testcases.yaml @@ -27,6 +27,10 @@ base_test_data: package-versions: | #!/bin/sh dpkg-query --show + build.info: | + #!/bin/sh + binfo=/etc/cloud/build.info + [ -f "$binfo" ] && cat "$binfo" || echo "N/A" system.journal.gz: | #!/bin/sh [ -d /run/systemd ] || { echo "not systemd."; exit 0; } diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py index bd548f5a..6bb39f77 100644 --- a/tests/cloud_tests/testcases/__init__.py +++ b/tests/cloud_tests/testcases/__init__.py @@ -4,8 +4,7 @@ import importlib import inspect -import unittest -from unittest.util import strclass +import unittest2 from cloudinit.util import read_conf @@ -13,7 +12,7 @@ from tests.cloud_tests import config from tests.cloud_tests.testcases.base import CloudTestCase as base_test -def discover_tests(test_name): +def discover_test(test_name): """Discover tests in test file for 'testname'. @return_value: list of test classes @@ -25,35 +24,48 @@ def discover_tests(test_name): except NameError: raise ValueError('no test verifier found at: {}'.format(testmod_name)) - return [mod for name, mod in inspect.getmembers(testmod) - if inspect.isclass(mod) and base_test in inspect.getmro(mod) and - getattr(mod, '__test__', True)] + found = [mod for name, mod in inspect.getmembers(testmod) + if (inspect.isclass(mod) + and base_test in inspect.getmro(mod) + and getattr(mod, '__test__', True))] + if len(found) != 1: + raise RuntimeError( + "Unexpected situation, multiple tests for %s: %s" % ( + test_name, found)) + return found -def get_suite(test_name, data, conf): - """Get test suite with all tests for 'testname'. - @return_value: a test suite - """ - suite = unittest.TestSuite() - for test_class in discover_tests(test_name): +def get_test_class(test_name, test_data, test_conf): + test_class = discover_test(test_name)[0] + + class DynamicTestSubclass(test_class): - class tmp(test_class): + _realclass = test_class + data = test_data + conf = test_conf + release_conf = read_conf(config.RELEASES_CONF)['releases'] - _realclass = test_class + def __str__(self): + return "%s (%s)" % (self._testMethodName, + unittest2.util.strclass(self._realclass)) - def __str__(self): - return "%s (%s)" % (self._testMethodName, - strclass(self._realclass)) + @classmethod + def setUpClass(cls): + cls.maybeSkipTest() - @classmethod - def setUpClass(cls): - cls.data = data - cls.conf = conf - cls.release_conf = read_conf(config.RELEASES_CONF)['releases'] + return DynamicTestSubclass - suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(tmp)) +def get_suite(test_name, data, conf): + """Get test suite with all tests for 'testname'. + + @return_value: a test suite + """ + suite = unittest2.TestSuite() + suite.addTest( + unittest2.defaultTestLoader.loadTestsFromTestCase( + get_test_class(test_name, data, conf))) return suite # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 696db8dd..e18d601c 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -5,15 +5,15 @@ import crypt import json import re -import unittest +import unittest2 from cloudinit import util as c_util -SkipTest = unittest.SkipTest +SkipTest = unittest2.SkipTest -class CloudTestCase(unittest.TestCase): +class CloudTestCase(unittest2.TestCase): """Base test class for verifiers.""" # data gets populated in get_suite.setUpClass @@ -31,6 +31,11 @@ class CloudTestCase(unittest.TestCase): def is_distro(self, distro_name): return self.os_cfg['os'] == distro_name + @classmethod + def maybeSkipTest(cls): + """Present to allow subclasses to override and raise a skipTest.""" + pass + def assertPackageInstalled(self, name, version=None): """Check dpkg-query --show output for matching package name. @@ -167,11 +172,12 @@ class CloudTestCase(unittest.TestCase): 'Skipping instance-data.json test.' ' OS: %s not bionic or newer' % self.os_name) instance_data = json.loads(out) - self.assertEqual( - ['ds/user-data'], instance_data['base64-encoded-keys']) + self.assertItemsEqual( + [], + instance_data['base64_encoded_keys']) ds = instance_data.get('ds', {}) v1_data = instance_data.get('v1', {}) - metadata = ds.get('meta-data', {}) + metadata = ds.get('meta_data', {}) macs = metadata.get( 'network', {}).get('interfaces', {}).get('macs', {}) if not macs: @@ -187,10 +193,10 @@ class CloudTestCase(unittest.TestCase): metadata.get('placement', {}).get('availability-zone'), 'Could not determine EC2 Availability zone placement') self.assertIsNotNone( - v1_data['availability-zone'], 'expected ec2 availability-zone') - self.assertEqual('aws', v1_data['cloud-name']) - self.assertIn('i-', v1_data['instance-id']) - self.assertIn('ip-', v1_data['local-hostname']) + v1_data['availability_zone'], 'expected ec2 availability_zone') + self.assertEqual('aws', v1_data['cloud_name']) + self.assertIn('i-', v1_data['instance_id']) + self.assertIn('ip-', v1_data['local_hostname']) self.assertIsNotNone(v1_data['region'], 'expected ec2 region') def test_instance_data_json_lxd(self): @@ -213,16 +219,14 @@ class CloudTestCase(unittest.TestCase): ' OS: %s not bionic or newer' % self.os_name) instance_data = json.loads(out) v1_data = instance_data.get('v1', {}) - self.assertEqual( - ['ds/user-data', 'ds/vendor-data'], - sorted(instance_data['base64-encoded-keys'])) - self.assertEqual('nocloud', v1_data['cloud-name']) + self.assertItemsEqual([], sorted(instance_data['base64_encoded_keys'])) + self.assertEqual('nocloud', v1_data['cloud_name']) self.assertIsNone( - v1_data['availability-zone'], - 'found unexpected lxd availability-zone %s' % - v1_data['availability-zone']) - self.assertIn('cloud-test', v1_data['instance-id']) - self.assertIn('cloud-test', v1_data['local-hostname']) + v1_data['availability_zone'], + 'found unexpected lxd availability_zone %s' % + v1_data['availability_zone']) + self.assertIn('cloud-test', v1_data['instance_id']) + self.assertIn('cloud-test', v1_data['local_hostname']) self.assertIsNone( v1_data['region'], 'found unexpected lxd region %s' % v1_data['region']) @@ -248,18 +252,17 @@ class CloudTestCase(unittest.TestCase): ' OS: %s not bionic or newer' % self.os_name) instance_data = json.loads(out) v1_data = instance_data.get('v1', {}) - self.assertEqual( - ['ds/user-data'], instance_data['base64-encoded-keys']) - self.assertEqual('nocloud', v1_data['cloud-name']) + self.assertItemsEqual([], instance_data['base64_encoded_keys']) + self.assertEqual('nocloud', v1_data['cloud_name']) self.assertIsNone( - v1_data['availability-zone'], - 'found unexpected kvm availability-zone %s' % - v1_data['availability-zone']) + v1_data['availability_zone'], + 'found unexpected kvm availability_zone %s' % + v1_data['availability_zone']) self.assertIsNotNone( re.match(r'[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}', - v1_data['instance-id']), - 'kvm instance-id is not a UUID: %s' % v1_data['instance-id']) - self.assertIn('ubuntu', v1_data['local-hostname']) + v1_data['instance_id']), + 'kvm instance_id is not a UUID: %s' % v1_data['instance_id']) + self.assertIn('ubuntu', v1_data['local_hostname']) self.assertIsNone( v1_data['region'], 'found unexpected lxd region %s' % v1_data['region']) diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.py b/tests/cloud_tests/testcases/modules/lxd_bridge.py index c0262ba3..ea545e0a 100644 --- a/tests/cloud_tests/testcases/modules/lxd_bridge.py +++ b/tests/cloud_tests/testcases/modules/lxd_bridge.py @@ -7,15 +7,25 @@ from tests.cloud_tests.testcases import base class TestLxdBridge(base.CloudTestCase): """Test LXD module.""" + @classmethod + def maybeSkipTest(cls): + """Skip on cosmic for two reasons: + a.) LP: #1795036 - 'lxd init' fails on cosmic kernel. + b.) apt install lxd installs via snap which can be slow + as that will download core snap and lxd.""" + os_name = cls.data.get('os_name', 'UNKNOWN') + if os_name == "cosmic": + raise base.SkipTest('Skipping test on cosmic (LP: #1795036).') + def test_lxd(self): """Test lxd installed.""" out = self.get_data_file('lxd') - self.assertIn('/usr/bin/lxd', out) + self.assertIn('/lxd', out) def test_lxc(self): """Test lxc installed.""" out = self.get_data_file('lxc') - self.assertIn('/usr/bin/lxc', out) + self.assertIn('/lxc', out) def test_bridge(self): """Test bridge config.""" diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.py b/tests/cloud_tests/testcases/modules/lxd_dir.py index 1495674e..797bafed 100644 --- a/tests/cloud_tests/testcases/modules/lxd_dir.py +++ b/tests/cloud_tests/testcases/modules/lxd_dir.py @@ -7,14 +7,24 @@ from tests.cloud_tests.testcases import base class TestLxdDir(base.CloudTestCase): """Test LXD module.""" + @classmethod + def maybeSkipTest(cls): + """Skip on cosmic for two reasons: + a.) LP: #1795036 - 'lxd init' fails on cosmic kernel. + b.) apt install lxd installs via snap which can be slow + as that will download core snap and lxd.""" + os_name = cls.data.get('os_name', 'UNKNOWN') + if os_name == "cosmic": + raise base.SkipTest('Skipping test on cosmic (LP: #1795036).') + def test_lxd(self): """Test lxd installed.""" out = self.get_data_file('lxd') - self.assertIn('/usr/bin/lxd', out) + self.assertIn('/lxd', out) def test_lxc(self): """Test lxc installed.""" out = self.get_data_file('lxc') - self.assertIn('/usr/bin/lxc', out) + self.assertIn('/lxc', out) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py index 7d341773..0f4c3d08 100644 --- a/tests/cloud_tests/testcases/modules/ntp_chrony.py +++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. """cloud-init Integration Test Verify Script.""" -import unittest +import unittest2 from tests.cloud_tests.testcases import base @@ -13,7 +13,7 @@ class TestNtpChrony(base.CloudTestCase): """Skip this suite of tests on lxd and artful or older.""" if self.platform == 'lxd': if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0: - raise unittest.SkipTest( + raise unittest2.SkipTest( 'No support for chrony on containers <= artful.' ' LP: #1589780') return super(TestNtpChrony, self).setUp() diff --git a/tests/cloud_tests/testcases/modules/snap.yaml b/tests/cloud_tests/testcases/modules/snap.yaml index 44043f31..322199c3 100644 --- a/tests/cloud_tests/testcases/modules/snap.yaml +++ b/tests/cloud_tests/testcases/modules/snap.yaml @@ -1,6 +1,9 @@ # # Install snappy # +# Aug 23, 2018: Disabled due to requiring a proxy for testing +# tests do not handle the proxy well at this time. +enabled: False required_features: - snap cloud_config: | diff --git a/tests/cloud_tests/testcases/modules/snappy.yaml b/tests/cloud_tests/testcases/modules/snappy.yaml index 43f93295..8ac322ae 100644 --- a/tests/cloud_tests/testcases/modules/snappy.yaml +++ b/tests/cloud_tests/testcases/modules/snappy.yaml @@ -1,6 +1,9 @@ # # Install snappy # +# Aug 17, 2018: Disabled due to requiring a proxy for testing +# tests do not handle the proxy well at this time. +enabled: False required_features: - snap cloud_config: | diff --git a/tests/cloud_tests/testcases/modules/write_files.py b/tests/cloud_tests/testcases/modules/write_files.py index 7bd520f6..526a2ebd 100644 --- a/tests/cloud_tests/testcases/modules/write_files.py +++ b/tests/cloud_tests/testcases/modules/write_files.py @@ -14,8 +14,11 @@ class TestWriteFiles(base.CloudTestCase): def test_binary(self): """Test binary file reads as executable.""" - out = self.get_data_file('file_binary') - self.assertIn('ELF 64-bit LSB executable, x86-64, version 1', out) + out = self.get_data_file('file_binary').strip() + md5 = "3801184b97bb8c6e63fa0e1eae2920d7" + sha256 = ("2c791c4037ea5bd7e928d6a87380f8ba7a803cd83d" + "5e4f269e28f5090f0f2c9a") + self.assertIn(out, (md5 + " -", sha256 + " -")) def test_gzip(self): """Test gzip file shows up as a shell script.""" diff --git a/tests/cloud_tests/testcases/modules/write_files.yaml b/tests/cloud_tests/testcases/modules/write_files.yaml index ce936b7b..cc7ea4bd 100644 --- a/tests/cloud_tests/testcases/modules/write_files.yaml +++ b/tests/cloud_tests/testcases/modules/write_files.yaml @@ -3,6 +3,13 @@ # # NOTE: on trusty 'file' has an output formatting error for binary files and # has 2 spaces in 'LSB executable', which causes a failure here +# +# NOTE: the binary data can be any binary data, not only executables +# and can be generated via the base 64 command as such: +# $ base64 < hello > hello.txt +# the opposite is running: +# $ base64 -d < hello.txt > hello +# required_features: - no_file_fmt_e cloud_config: | @@ -19,9 +26,7 @@ cloud_config: | SMBDOPTIONS="-D" path: /root/file_text - content: !!binary | - f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI - AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA - AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA + /Z/xrHR4WINT0UNoKPQKbuovp6+Js+JK path: /root/file_binary permissions: '0555' - encoding: gzip @@ -38,7 +43,9 @@ collect_scripts: file /root/file_text file_binary: | #!/bin/bash - file /root/file_binary + for hasher in md5sum sha256sum; do + $hasher </root/file_binary && break + done file_gzip: | #!/bin/bash file /root/file_gzip diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index bfb27444..9911ecf2 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -3,7 +3,7 @@ """Verify test results.""" import os -import unittest +import unittest2 from tests.cloud_tests import (config, LOG, util, testcases) @@ -18,7 +18,7 @@ def verify_data(data_dir, platform, os_name, tests): @return_value: {<test_name>: {passed: True/False, failures: []}} """ base_dir = os.sep.join((data_dir, platform, os_name)) - runner = unittest.TextTestRunner(verbosity=util.current_verbosity()) + runner = unittest2.TextTestRunner(verbosity=util.current_verbosity()) res = {} for test_name in tests: LOG.debug('verifying test data for %s', test_name) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 9751ed95..abe820e1 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -2,27 +2,34 @@ """Tests of the built-in user data handlers.""" +import copy import os import shutil import tempfile +from textwrap import dedent -try: - from unittest import mock -except ImportError: - import mock -from cloudinit.tests import helpers as test_helpers +from cloudinit.tests.helpers import ( + FilesystemMockingTestCase, CiTestCase, mock, skipUnlessJinja) from cloudinit import handlers from cloudinit import helpers from cloudinit import util -from cloudinit.handlers import upstart_job +from cloudinit.handlers.cloud_config import CloudConfigPartHandler +from cloudinit.handlers.jinja_template import ( + JinjaTemplatePartHandler, convert_jinja_instance_data, + render_jinja_payload) +from cloudinit.handlers.shell_script import ShellScriptPartHandler +from cloudinit.handlers.upstart_job import UpstartJobPartHandler from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE) -class TestBuiltins(test_helpers.FilesystemMockingTestCase): +class TestUpstartJobPartHandler(FilesystemMockingTestCase): + + mpath = 'cloudinit.handlers.upstart_job.' + def test_upstart_frequency_no_out(self): c_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, c_root) @@ -32,14 +39,13 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): 'cloud_dir': c_root, 'upstart_dir': up_root, }) - freq = PER_ALWAYS - h = upstart_job.UpstartJobPartHandler(paths) + h = UpstartJobPartHandler(paths) # No files should be written out when # the frequency is ! per-instance h.handle_part('', handlers.CONTENT_START, None, None, None) h.handle_part('blah', 'text/upstart-job', - 'test.conf', 'blah', freq) + 'test.conf', 'blah', frequency=PER_ALWAYS) h.handle_part('', handlers.CONTENT_END, None, None, None) self.assertEqual(0, len(os.listdir(up_root))) @@ -48,7 +54,6 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): # files should be written out when frequency is ! per-instance new_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, new_root) - freq = PER_INSTANCE self.patchOS(new_root) self.patchUtils(new_root) @@ -56,22 +61,297 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): 'upstart_dir': "/etc/upstart", }) - upstart_job.SUITABLE_UPSTART = True util.ensure_dir("/run") util.ensure_dir("/etc/upstart") - with mock.patch.object(util, 'subp') as mockobj: - h = upstart_job.UpstartJobPartHandler(paths) - h.handle_part('', handlers.CONTENT_START, - None, None, None) - h.handle_part('blah', 'text/upstart-job', - 'test.conf', 'blah', freq) - h.handle_part('', handlers.CONTENT_END, - None, None, None) + with mock.patch(self.mpath + 'SUITABLE_UPSTART', return_value=True): + with mock.patch.object(util, 'subp') as m_subp: + h = UpstartJobPartHandler(paths) + h.handle_part('', handlers.CONTENT_START, + None, None, None) + h.handle_part('blah', 'text/upstart-job', + 'test.conf', 'blah', frequency=PER_INSTANCE) + h.handle_part('', handlers.CONTENT_END, + None, None, None) - self.assertEqual(len(os.listdir('/etc/upstart')), 1) + self.assertEqual(len(os.listdir('/etc/upstart')), 1) - mockobj.assert_called_once_with( + m_subp.assert_called_once_with( ['initctl', 'reload-configuration'], capture=False) + +class TestJinjaTemplatePartHandler(CiTestCase): + + with_logs = True + + mpath = 'cloudinit.handlers.jinja_template.' + + def setUp(self): + super(TestJinjaTemplatePartHandler, self).setUp() + self.tmp = self.tmp_dir() + self.run_dir = os.path.join(self.tmp, 'run_dir') + util.ensure_dir(self.run_dir) + self.paths = helpers.Paths({ + 'cloud_dir': self.tmp, 'run_dir': self.run_dir}) + + def test_jinja_template_part_handler_defaults(self): + """On init, paths are saved and subhandler types are empty.""" + h = JinjaTemplatePartHandler(self.paths) + self.assertEqual(['## template: jinja'], h.prefixes) + self.assertEqual(3, h.handler_version) + self.assertEqual(self.paths, h.paths) + self.assertEqual({}, h.sub_handlers) + + def test_jinja_template_part_handler_looks_up_sub_handler_types(self): + """When sub_handlers are passed, init lists types of subhandlers.""" + script_handler = ShellScriptPartHandler(self.paths) + cloudconfig_handler = CloudConfigPartHandler(self.paths) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler, cloudconfig_handler]) + self.assertItemsEqual( + ['text/cloud-config', 'text/cloud-config-jsonp', + 'text/x-shellscript'], + h.sub_handlers) + + def test_jinja_template_part_handler_looks_up_subhandler_types(self): + """When sub_handlers are passed, init lists types of subhandlers.""" + script_handler = ShellScriptPartHandler(self.paths) + cloudconfig_handler = CloudConfigPartHandler(self.paths) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler, cloudconfig_handler]) + self.assertItemsEqual( + ['text/cloud-config', 'text/cloud-config-jsonp', + 'text/x-shellscript'], + h.sub_handlers) + + def test_jinja_template_handle_noop_on_content_signals(self): + """Perform no part handling when content type is CONTENT_SIGNALS.""" + script_handler = ShellScriptPartHandler(self.paths) + + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + with mock.patch.object(script_handler, 'handle_part') as m_handle_part: + h.handle_part( + data='data', ctype=handlers.CONTENT_START, filename='part-1', + payload='## template: jinja\n#!/bin/bash\necho himom', + frequency='freq', headers='headers') + m_handle_part.assert_not_called() + + @skipUnlessJinja() + def test_jinja_template_handle_subhandler_v2_with_clean_payload(self): + """Call version 2 subhandler.handle_part with stripped payload.""" + script_handler = ShellScriptPartHandler(self.paths) + self.assertEqual(2, script_handler.handler_version) + + # Create required instance-data.json file + instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_data = {'topkey': 'echo himom'} + util.write_file(instance_json, util.json_dumps(instance_data)) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + with mock.patch.object(script_handler, 'handle_part') as m_part: + # ctype with leading '!' not in handlers.CONTENT_SIGNALS + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_START, + filename='part01', + payload='## template: jinja \t \n#!/bin/bash\n{{ topkey }}', + frequency='freq', headers='headers') + m_part.assert_called_once_with( + 'data', '!__begin__', 'part01', '#!/bin/bash\necho himom', 'freq') + + @skipUnlessJinja() + def test_jinja_template_handle_subhandler_v3_with_clean_payload(self): + """Call version 3 subhandler.handle_part with stripped payload.""" + cloudcfg_handler = CloudConfigPartHandler(self.paths) + self.assertEqual(3, cloudcfg_handler.handler_version) + + # Create required instance-data.json file + instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_data = {'topkey': {'sub': 'runcmd: [echo hi]'}} + util.write_file(instance_json, util.json_dumps(instance_data)) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[cloudcfg_handler]) + with mock.patch.object(cloudcfg_handler, 'handle_part') as m_part: + # ctype with leading '!' not in handlers.CONTENT_SIGNALS + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_END, + filename='part01', + payload='## template: jinja\n#cloud-config\n{{ topkey.sub }}', + frequency='freq', headers='headers') + m_part.assert_called_once_with( + 'data', '!__end__', 'part01', '#cloud-config\nruncmd: [echo hi]', + 'freq', 'headers') + + def test_jinja_template_handle_errors_on_missing_instance_data_json(self): + """If instance-data is absent, raise an error from handle_part.""" + script_handler = ShellScriptPartHandler(self.paths) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + with self.assertRaises(RuntimeError) as context_manager: + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_START, + filename='part01', + payload='## template: jinja \n#!/bin/bash\necho himom', + frequency='freq', headers='headers') + script_file = os.path.join(script_handler.script_dir, 'part01') + self.assertEqual( + 'Cannot render jinja template vars. Instance data not yet present' + ' at {}/instance-data.json'.format( + self.run_dir), str(context_manager.exception)) + self.assertFalse( + os.path.exists(script_file), + 'Unexpected file created %s' % script_file) + + @skipUnlessJinja() + def test_jinja_template_handle_renders_jinja_content(self): + """When present, render jinja variables from instance-data.json.""" + script_handler = ShellScriptPartHandler(self.paths) + instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_data = {'topkey': {'subkey': 'echo himom'}} + util.write_file(instance_json, util.json_dumps(instance_data)) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_START, + filename='part01', + payload=( + '## template: jinja \n' + '#!/bin/bash\n' + '{{ topkey.subkey|default("nosubkey") }}'), + frequency='freq', headers='headers') + script_file = os.path.join(script_handler.script_dir, 'part01') + self.assertNotIn( + 'Instance data not yet present at {}/instance-data.json'.format( + self.run_dir), + self.logs.getvalue()) + self.assertEqual( + '#!/bin/bash\necho himom', util.load_file(script_file)) + + @skipUnlessJinja() + def test_jinja_template_handle_renders_jinja_content_missing_keys(self): + """When specified jinja variable is undefined, log a warning.""" + script_handler = ShellScriptPartHandler(self.paths) + instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_data = {'topkey': {'subkey': 'echo himom'}} + util.write_file(instance_json, util.json_dumps(instance_data)) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_START, + filename='part01', + payload='## template: jinja \n#!/bin/bash\n{{ goodtry }}', + frequency='freq', headers='headers') + script_file = os.path.join(script_handler.script_dir, 'part01') + self.assertTrue( + os.path.exists(script_file), + 'Missing expected file %s' % script_file) + self.assertIn( + "WARNING: Could not render jinja template variables in file" + " 'part01': 'goodtry'\n", + self.logs.getvalue()) + + +class TestConvertJinjaInstanceData(CiTestCase): + + def test_convert_instance_data_hyphens_to_underscores(self): + """Replace hyphenated keys with underscores in instance-data.""" + data = {'hyphenated-key': 'hyphenated-val', + 'underscore_delim_key': 'underscore_delimited_val'} + expected_data = {'hyphenated_key': 'hyphenated-val', + 'underscore_delim_key': 'underscore_delimited_val'} + self.assertEqual( + expected_data, + convert_jinja_instance_data(data=data)) + + def test_convert_instance_data_promotes_versioned_keys_to_top_level(self): + """Any versioned keys are promoted as top-level keys + + This provides any cloud-init standardized keys up at a top-level to + allow ease of reference for users. Intsead of v1.availability_zone, + the name availability_zone can be used in templates. + """ + data = {'ds': {'dskey1': 1, 'dskey2': 2}, + 'v1': {'v1key1': 'v1.1'}, + 'v2': {'v2key1': 'v2.1'}} + expected_data = copy.deepcopy(data) + expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'}) + + converted_data = convert_jinja_instance_data(data=data) + self.assertItemsEqual( + ['ds', 'v1', 'v2', 'v1key1', 'v2key1'], converted_data.keys()) + self.assertEqual( + expected_data, + converted_data) + + def test_convert_instance_data_most_recent_version_of_promoted_keys(self): + """The most-recent versioned key value is promoted to top-level.""" + data = {'v1': {'key1': 'old v1 key1', 'key2': 'old v1 key2'}, + 'v2': {'key1': 'newer v2 key1', 'key3': 'newer v2 key3'}, + 'v3': {'key1': 'newest v3 key1'}} + expected_data = copy.deepcopy(data) + expected_data.update( + {'key1': 'newest v3 key1', 'key2': 'old v1 key2', + 'key3': 'newer v2 key3'}) + + converted_data = convert_jinja_instance_data(data=data) + self.assertEqual( + expected_data, + converted_data) + + def test_convert_instance_data_decodes_decode_paths(self): + """Any decode_paths provided are decoded by convert_instance_data.""" + data = {'key1': {'subkey1': 'aGkgbW9t'}, 'key2': 'aGkgZGFk'} + expected_data = copy.deepcopy(data) + expected_data['key1']['subkey1'] = 'hi mom' + + converted_data = convert_jinja_instance_data( + data=data, decode_paths=('key1/subkey1',)) + self.assertEqual( + expected_data, + converted_data) + + +class TestRenderJinjaPayload(CiTestCase): + + with_logs = True + + @skipUnlessJinja() + def test_render_jinja_payload_logs_jinja_vars_on_debug(self): + """When debug is True, log jinja varables available.""" + payload = ( + '## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}') + instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'} + expected_log = dedent("""\ + DEBUG: Converted jinja variables + { + "hostname": "foo", + "instance_id": "iid", + "v1": { + "hostname": "foo" + } + } + """) + self.assertEqual( + render_jinja_payload( + payload=payload, payload_fn='myfile', + instance_data=instance_data, debug=True), + '#!/bin/sh\necho hi from foo') + self.assertEqual(expected_log, self.logs.getvalue()) + + @skipUnlessJinja() + def test_render_jinja_payload_replaces_missing_variables_and_warns(self): + """Warn on missing jinja variables and replace the absent variable.""" + payload = ( + '## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}') + instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'} + self.assertEqual( + render_jinja_payload( + payload=payload, payload_fn='myfile', + instance_data=instance_data), + '#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE') + expected_log = ( + 'WARNING: Could not render jinja template variables in file' + " 'myfile': 'NOTHERE'") + self.assertIn(expected_log, self.logs.getvalue()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 0c0f427a..199d69b0 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -208,8 +208,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): for subcommand in expected_subcommands: self.assertIn(subcommand, error) - @mock.patch('cloudinit.config.schema.handle_schema_args') - def test_wb_devel_schema_subcommand_parser(self, m_schema): + def test_wb_devel_schema_subcommand_parser(self): """The subcommand cloud-init schema calls the correct subparser.""" exit_code = self._call_main(['cloud-init', 'devel', 'schema']) self.assertEqual(1, exit_code) diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index 3253f3ad..ff35904e 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -262,64 +262,56 @@ class TestUserDataRhevm(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.user_data_rhevm() ''' - cmd_pass = ['true'] - cmd_fail = ['false'] - cmd_not_found = ['bogus bad command'] - def setUp(self): '''Set up.''' self.paths = helpers.Paths({'cloud_dir': '/tmp'}) - self.mount_dir = tempfile.mkdtemp() + self.mount_dir = self.tmp_dir() _write_user_data_files(self.mount_dir, 'test user data') - - def tearDown(self): - # Reset - - _remove_user_data_files(self.mount_dir) - - # Attempt to remove the temp dir ignoring errors - try: - shutil.rmtree(self.mount_dir) - except OSError: - pass - - dsac.CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' - dsac.CMD_PROBE_FLOPPY = ['modprobe', 'floppy'] - dsac.CMD_UDEVADM_SETTLE = ['udevadm', 'settle', - '--quiet', '--timeout=5'] + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy', + 'm_modprobe_floppy', return_value=None) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle', + 'm_udevadm_settle', return_value=('', '')) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.mount_cb', + 'm_mount_cb') def test_mount_cb_fails(self): '''Test user_data_rhevm() where mount_cb fails.''' - dsac.CMD_PROBE_FLOPPY = self.cmd_pass + self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_modprobe_fails(self): '''Test user_data_rhevm() where modprobe fails.''' - dsac.CMD_PROBE_FLOPPY = self.cmd_fail + self.m_modprobe_floppy.side_effect = util.ProcessExecutionError( + "Failed modprobe") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_modprobe_cmd(self): '''Test user_data_rhevm() with no modprobe command.''' - dsac.CMD_PROBE_FLOPPY = self.cmd_not_found + self.m_modprobe_floppy.side_effect = util.ProcessExecutionError( + "No such file or dir") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_udevadm_fails(self): '''Test user_data_rhevm() where udevadm fails.''' - dsac.CMD_UDEVADM_SETTLE = self.cmd_fail + self.m_udevadm_settle.side_effect = util.ProcessExecutionError( + "Failed settle.") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_udevadm_cmd(self): '''Test user_data_rhevm() with no udevadm command.''' - dsac.CMD_UDEVADM_SETTLE = self.cmd_not_found + self.m_udevadm_settle.side_effect = OSError("No such file or dir") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index e82716eb..4e428b71 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,15 +1,21 @@ # This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import distros from cloudinit import helpers -from cloudinit.sources import DataSourceAzure as dsaz +from cloudinit import url_helper +from cloudinit.sources import ( + UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, find_freebsd_part, get_path_dev_freebsd, MountFailedError) from cloudinit.version import version_string as vs -from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, - ExitStack, PY26, SkipTest) +from cloudinit.tests.helpers import ( + HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, + ExitStack, PY26, SkipTest) import crypt +import httpretty +import json import os import stat import xml.etree.ElementTree as ET @@ -77,6 +83,106 @@ def construct_valid_ovf_env(data=None, pubkeys=None, return content +NETWORK_METADATA = { + "network": { + "interface": [ + { + "macAddress": "000D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.0.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81" + } + ] + } + } + ] + } +} + + +class TestGetMetadataFromIMDS(HttprettyTestCase): + + with_logs = True + + def setUp(self): + super(TestGetMetadataFromIMDS, self).setUp() + self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2017-12-01" + + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + def test_get_metadata_does_not_dhcp_if_network_is_up( + self, m_net_is_up, m_dhcp, m_readurl): + """Do not perform DHCP setup when nic is already up.""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=3)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_not_called() + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + def test_get_metadata_performs_dhcp_when_network_is_down( + self, m_net_is_up, m_dhcp, m_readurl): + """Perform DHCP setup when nic is not up.""" + m_net_is_up.return_value = False + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_called_with('eth9') + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + m_readurl.assert_called_with( + self.network_md_url, exception_cb=mock.ANY, + headers={'Metadata': 'true'}, retries=2, timeout=1) + + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + def test_get_metadata_from_imds_empty_when_no_imds_present( + self, m_net_is_up, m_sleep): + """Return empty dict when IMDS network metadata is absent.""" + httpretty.register_uri( + httpretty.GET, + dsaz.IMDS_URL + 'instance?api-version=2017-12-01', + body={}, status=404) + + m_net_is_up.return_value = True # skips dhcp + + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list) + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + class TestAzureDataSource(CiTestCase): with_logs = True @@ -95,8 +201,19 @@ class TestAzureDataSource(CiTestCase): self.patches = ExitStack() self.addCleanup(self.patches.close) - self.patches.enter_context(mock.patch.object(dsaz, '_get_random_seed')) - + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) + self.m_get_metadata_from_imds = self.patches.enter_context( + mock.patch.object( + dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value=NETWORK_METADATA))) + self.m_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.sources.net.find_fallback_nic', + return_value='eth9')) + self.m_remove_ubuntu_network_scripts = self.patches.enter_context( + mock.patch.object( + dsaz, 'maybe_remove_ubuntu_network_config_scripts', + mock.MagicMock())) super(TestAzureDataSource, self).setUp() def apply_patches(self, patches): @@ -137,7 +254,7 @@ scbus-1 on xpt0 bus 0 ]) return dsaz - def _get_ds(self, data, agent_command=None): + def _get_ds(self, data, agent_command=None, distro=None): def dsdevs(): return data.get('dsdevs', []) @@ -186,8 +303,11 @@ scbus-1 on xpt0 bus 0 side_effect=_wait_for_files)), ]) + if distro is not None: + distro_cls = distros.fetch(distro) + distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) dsrc = dsaz.DataSourceAzure( - data.get('sys_cfg', {}), distro=None, paths=self.paths) + data.get('sys_cfg', {}), distro=distro, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -260,29 +380,20 @@ fdescfs /dev/fd fdescfs rw 0 0 res = get_path_dev_freebsd('/etc', mnt_list) self.assertIsNotNone(res) - @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') - def test_non_azure_dmi_chassis_asset_tag(self, m_read_dmi_data): - """Report non-azure when DMI's chassis asset tag doesn't match. - - Return False when the asset tag doesn't match Azure's static - AZURE_CHASSIS_ASSET_TAG. - """ + @mock.patch('cloudinit.sources.DataSourceAzure._is_platform_viable') + def test_call_is_platform_viable_seed(self, m_is_platform_viable): + """Check seed_dir using _is_platform_viable and return False.""" # Return a non-matching asset tag value - nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' - m_read_dmi_data.return_value = nonazure_tag + m_is_platform_viable.return_value = False dsrc = dsaz.DataSourceAzure( {}, distro=None, paths=self.paths) self.assertFalse(dsrc.get_data()) - self.assertEqual( - "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( - nonazure_tag), - self.logs.getvalue()) + m_is_platform_viable.assert_called_with(dsrc.seed_dir) def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} - dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) @@ -291,6 +402,82 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertTrue(os.path.isfile( os.path.join(self.waagent_d, 'ovf-env.xml'))) + def test_get_data_non_ubuntu_will_not_remove_network_scripts(self): + """get_data on non-Ubuntu will not remove ubuntu net scripts.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data, distro='debian') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_not_called() + + def test_get_data_on_ubuntu_will_remove_network_scripts(self): + """get_data will remove ubuntu net scripts on Ubuntu distro.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data, distro='ubuntu') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_called_once_with() + + def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): + """Return all structured metadata and cache no class attributes.""" + yaml_cfg = "{agent_command: my_command}\n" + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, + 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + dsrc = self._get_ds(data) + expected_cfg = { + 'PreprovisionedVm': False, + 'datasource': {'Azure': {'agent_command': 'my_command'}}, + 'system_info': {'default_user': {'name': u'myuser'}}} + expected_metadata = { + 'azure_data': { + 'configurationsettype': 'LinuxProvisioningConfiguration'}, + 'imds': {'network': {'interface': [{ + 'ipv4': {'ipAddress': [ + {'privateIpAddress': '10.0.0.4', + 'publicIpAddress': '104.46.124.81'}], + 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, + 'ipv6': {'ipAddress': []}, + 'macAddress': '000D3A047598'}]}}, + 'instance-id': 'test-instance-id', + 'local-hostname': u'myhost', + 'random_seed': 'wild'} + + crawled_metadata = dsrc.crawl_metadata() + + self.assertItemsEqual( + crawled_metadata.keys(), + ['cfg', 'files', 'metadata', 'userdata_raw']) + self.assertEqual(crawled_metadata['cfg'], expected_cfg) + self.assertEqual( + list(crawled_metadata['files'].keys()), ['ovf-env.xml']) + self.assertIn( + b'<HostName>myhost</HostName>', + crawled_metadata['files']['ovf-env.xml']) + self.assertEqual(crawled_metadata['metadata'], expected_metadata) + self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR') + self.assertEqual(dsrc.userdata_raw, None) + self.assertEqual(dsrc.metadata, {}) + self.assertEqual(dsrc._metadata_imds, UNSET) + self.assertFalse(os.path.isfile( + os.path.join(self.waagent_d, 'ovf-env.xml'))) + + def test_crawl_metadata_raises_invalid_metadata_on_error(self): + """crawl_metadata raises an exception on invalid ovf-env.xml.""" + data = {'ovfcontent': "BOGUS", 'sys_cfg': {}} + dsrc = self._get_ds(data) + error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:' + ' syntax error: line 1, column 0') + with self.assertRaises(InvalidMetaDataException) as cm: + dsrc.crawl_metadata() + self.assertEqual(str(cm.exception), error_msg) + def test_waagent_d_has_0700_perms(self): # we expect /var/lib/waagent to be created 0700 dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) @@ -314,6 +501,20 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertTrue(ret) self.assertEqual(data['agent_invoked'], cfg['agent_command']) + def test_network_config_set_from_imds(self): + """Datasource.network_config returns IMDS network data.""" + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp4': True}}, + 'version': 2} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + def test_user_cfg_set_agent_command(self): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} @@ -579,12 +780,34 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertEqual( [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) + @mock.patch('cloudinit.net.generate_fallback_config') + def test_imds_network_config(self, mock_fallback): + """Network config is generated from IMDS network data when present.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + expected_cfg = { + 'ethernets': { + 'eth0': {'dhcp4': True, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, + 'version': 2} + + self.assertEqual(expected_cfg, dsrc.network_config) + mock_fallback.assert_not_called() + @mock.patch('cloudinit.net.get_interface_mac') @mock.patch('cloudinit.net.get_devicelist') @mock.patch('cloudinit.net.device_driver') @mock.patch('cloudinit.net.generate_fallback_config') - def test_network_config(self, mock_fallback, mock_dd, - mock_devlist, mock_get_mac): + def test_fallback_network_config(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + """On absent IMDS network data, generate network fallback config.""" odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} @@ -605,6 +828,8 @@ fdescfs /dev/fd fdescfs rw 0 0 mock_get_mac.return_value = '00:11:22:33:44:55' dsrc = self._get_ds(data) + # Represent empty response from network imds + self.m_get_metadata_from_imds.return_value = {} ret = dsrc.get_data() self.assertTrue(ret) @@ -617,8 +842,9 @@ fdescfs /dev/fd fdescfs rw 0 0 @mock.patch('cloudinit.net.get_devicelist') @mock.patch('cloudinit.net.device_driver') @mock.patch('cloudinit.net.generate_fallback_config') - def test_network_config_blacklist(self, mock_fallback, mock_dd, - mock_devlist, mock_get_mac): + def test_fallback_network_config_blacklist(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + """On absent network metadata, blacklist mlx from fallback config.""" odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} @@ -649,6 +875,8 @@ fdescfs /dev/fd fdescfs rw 0 0 mock_get_mac.return_value = '00:11:22:33:44:55' dsrc = self._get_ds(data) + # Represent empty response from network imds + self.m_get_metadata_from_imds.return_value = {} ret = dsrc.get_data() self.assertTrue(ret) @@ -689,9 +917,12 @@ class TestAzureBounce(CiTestCase): mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(dsaz.util, 'which', lambda x: True)) + mock.patch.object(dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(dsaz, '_get_random_seed')) + mock.patch.object(dsaz.util, 'which', lambda x: True)) + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) def _dmi_mocks(key): if key == 'system-uuid': @@ -719,9 +950,12 @@ class TestAzureBounce(CiTestCase): mock.patch.object(dsaz, 'set_hostname')) self.subp = self.patches.enter_context( mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) + self.find_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) def tearDown(self): self.patches.close() + super(TestAzureBounce, self).tearDown() def _get_ds(self, ovfcontent=None, agent_command=None): if ovfcontent is not None: @@ -927,7 +1161,7 @@ class TestLoadAzureDsDir(CiTestCase): str(context_manager.exception)) -class TestReadAzureOvf(TestCase): +class TestReadAzureOvf(CiTestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "<foo>" + construct_valid_ovf_env(data={}) @@ -1188,6 +1422,25 @@ class TestCanDevBeReformatted(CiTestCase): "(datasource.Azure.never_destroy_ntfs)", msg) +class TestClearCachedData(CiTestCase): + + def test_clear_cached_attrs_clears_imds(self): + """All class attributes are reset to defaults, including imds data.""" + tmp = self.tmp_dir() + paths = helpers.Paths( + {'cloud_dir': tmp, 'run_dir': tmp}) + dsrc = dsaz.DataSourceAzure({}, distro=None, paths=paths) + clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds] + dsrc.metadata = 'md' + dsrc.userdata = 'ud' + dsrc._metadata_imds = 'imds' + dsrc._dirty_cache = True + dsrc.clear_cached_attrs() + self.assertEqual( + [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], + clean_values) + + class TestAzureNetExists(CiTestCase): def test_azure_net_must_exist_for_legacy_objpkl(self): @@ -1398,4 +1651,94 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): self.assertEqual(m_net.call_count, 1) +class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestRemoveUbuntuNetworkConfigScripts, self).setUp() + self.tmp = self.tmp_dir() + + def test_remove_network_scripts_removes_both_files_and_directories(self): + """Any files or directories in paths are removed when present.""" + file1 = self.tmp_path('file1', dir=self.tmp) + subdir = self.tmp_path('sub1', dir=self.tmp) + subfile = self.tmp_path('leaf1', dir=subdir) + write_file(file1, 'file1content') + write_file(subfile, 'leafcontent') + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1]) + + for path in (file1, subdir, subfile): + self.assertFalse(os.path.exists(path), + 'Found unremoved: %s' % path) + + expected_logs = [ + 'INFO: Removing Ubuntu extended network scripts because cloud-init' + ' updates Azure network configuration on the following event:' + ' System boot.', + 'Recursively deleting %s' % subdir, + 'Attempting to remove %s' % file1] + for log in expected_logs: + self.assertIn(log, self.logs.getvalue()) + + def test_remove_network_scripts_only_attempts_removal_if_path_exists(self): + """Any files or directories absent are skipped without error.""" + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[ + self.tmp_path('nodirhere/', dir=self.tmp), + self.tmp_path('notfilehere', dir=self.tmp)]) + self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs + + @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists') + def test_remove_network_scripts_default_removes_stock_scripts(self, + m_exists): + """Azure's stock ubuntu image scripts and artifacts are removed.""" + # Report path absent on all to avoid delete operation + m_exists.return_value = False + dsaz.maybe_remove_ubuntu_network_config_scripts() + calls = m_exists.call_args_list + for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS: + self.assertIn(mock.call(path), calls) + + +class TestWBIsPlatformViable(CiTestCase): + """White box tests for _is_platform_viable.""" + with_logs = True + + @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + def test_true_on_non_azure_chassis(self, m_read_dmi_data): + """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG.""" + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + self.assertTrue(dsaz._is_platform_viable('doesnotmatter')) + + @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists') + @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist): + """Return True if ovf-env.xml exists in known seed dirs.""" + # Non-matching Azure chassis-asset-tag + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' + + m_exist.return_value = True + self.assertTrue(dsaz._is_platform_viable('/some/seed/dir')) + m_exist.called_once_with('/other/seed/dir') + + def test_false_on_no_matching_azure_criteria(self): + """Report non-azure on unmatched asset tag, ovf-env absent and no dev. + + Return False when the asset tag doesn't match Azure's static + AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs + and no devices have a label starting with prefix 'rd_rdfe_'. + """ + self.assertFalse(wrap_and_call( + 'cloudinit.sources.DataSourceAzure', + {'os.path.exists': False, + # Non-matching Azure chassis-asset-tag + 'util.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X', + 'util.which': None}, + dsaz._is_platform_viable, 'doesnotmatter')) + self.assertIn( + "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( + dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'), + self.logs.getvalue()) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index f6a59b6b..380ad1b5 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -42,6 +42,9 @@ class CepkoMock(Cepko): class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() + self.add_patch( + "cloudinit.sources.DataSourceCloudSigma.util.is_container", + "m_is_container", return_value=False) self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( "", "", paths=self.paths) diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 0d35dc29..6b01a4ea 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -20,6 +20,7 @@ from cloudinit.sources import ( DataSourceNoCloud as NoCloud, DataSourceOpenNebula as OpenNebula, DataSourceOpenStack as OpenStack, + DataSourceOracle as Oracle, DataSourceOVF as OVF, DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, @@ -37,10 +38,12 @@ DEFAULT_LOCAL = [ IBMCloud.DataSourceIBMCloud, NoCloud.DataSourceNoCloud, OpenNebula.DataSourceOpenNebula, + Oracle.DataSourceOracle, OVF.DataSourceOVF, SmartOS.DataSourceSmartOS, Ec2.DataSourceEc2Local, OpenStack.DataSourceOpenStackLocal, + Scaleway.DataSourceScaleway, ] DEFAULT_NETWORK = [ @@ -55,7 +58,6 @@ DEFAULT_NETWORK = [ NoCloud.DataSourceNoCloudNet, OpenStack.DataSourceOpenStack, OVF.DataSourceOVFNet, - Scaleway.DataSourceScaleway, ] diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 68400f22..231619c9 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -136,6 +136,7 @@ NETWORK_DATA_3 = { ] } +BOND_MAC = "fa:16:3e:b3:72:36" NETWORK_DATA_BOND = { "services": [ {"type": "dns", "address": "1.1.1.191"}, @@ -163,7 +164,7 @@ NETWORK_DATA_BOND = { {"bond_links": ["eth0", "eth1"], "bond_miimon": 100, "bond_mode": "4", "bond_xmit_hash_policy": "layer3+4", - "ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "ethernet_mac_address": BOND_MAC, "id": "bond0", "type": "bond"}, {"ethernet_mac_address": "fa:16:3e:b3:72:30", "id": "vlan2", "type": "vlan", "vlan_id": 602, @@ -224,6 +225,9 @@ class TestConfigDriveDataSource(CiTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() + self.add_patch( + "cloudinit.sources.DataSourceConfigDrive.util.find_devs_with", + "m_find_devs_with", return_value=[]) self.tmp = self.tmp_dir() def test_ec2_metadata(self): @@ -642,7 +646,7 @@ class TestConvertNetworkData(CiTestCase): routes) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), self.tmp) + network_state.parse_net_config_data(ncfg), target=self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() @@ -664,7 +668,7 @@ class TestConvertNetworkData(CiTestCase): eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), self.tmp) + network_state.parse_net_config_data(ncfg), target=self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() @@ -688,6 +692,9 @@ class TestConvertNetworkData(CiTestCase): self.assertIn("auto oeth0", eni_rendering) self.assertIn("auto oeth1", eni_rendering) self.assertIn("auto bond0", eni_rendering) + # The bond should have the given mac address + pos = eni_rendering.find("auto bond0") + self.assertIn(BOND_MAC, eni_rendering[pos:]) def test_vlan(self): # light testing of vlan config conversion and eni rendering @@ -695,7 +702,7 @@ class TestConvertNetworkData(CiTestCase): known_macs=KNOWN_MACS) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), self.tmp) + network_state.parse_net_config_data(ncfg), target=self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index cdbd1e1a..21931eb7 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -25,6 +25,8 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'get_cmdline', return_value=self.cmdline)) + self.mocks.enter_context( + mock.patch.object(util, 'read_dmi_data', return_value=None)) def test_nocloud_seed_dir(self): md = {'instance-id': 'IID', 'dsmode': 'local'} diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index ab42f344..61591017 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -43,6 +43,7 @@ DS_PATH = "cloudinit.sources.DataSourceOpenNebula" class TestOpenNebulaDataSource(CiTestCase): parsed_user = None + allowed_subp = ['bash'] def setUp(self): super(TestOpenNebulaDataSource, self).setUp() @@ -354,6 +355,412 @@ class TestOpenNebulaNetwork(unittest.TestCase): system_nics = ('eth0', 'ens3') + def test_context_devname(self): + """Verify context_devname correctly returns mac and name.""" + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH1_MAC': '02:00:0a:12:0f:0f', } + expected = { + '02:00:0a:12:01:01': 'ETH0', + '02:00:0a:12:0f:0f': 'ETH1', } + net = ds.OpenNebulaNetwork(context) + self.assertEqual(expected, net.context_devname) + + def test_get_nameservers(self): + """ + Verify get_nameservers('device') correctly returns DNS server addresses + and search domains. + """ + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + expected = { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']} + net = ds.OpenNebulaNetwork(context) + val = net.get_nameservers('eth0') + self.assertEqual(expected, val) + + def test_get_mtu(self): + """Verify get_mtu('device') correctly returns MTU size.""" + context = {'ETH0_MTU': '1280'} + net = ds.OpenNebulaNetwork(context) + val = net.get_mtu('eth0') + self.assertEqual('1280', val) + + def test_get_ip(self): + """Verify get_ip('device') correctly returns IPv4 address.""" + context = {'ETH0_IP': PUBLIC_IP} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(PUBLIC_IP, val) + + def test_get_ip_emptystring(self): + """ + Verify get_ip('device') correctly returns IPv4 address. + It returns IP address created by MAC address if ETH0_IP has empty + string. + """ + context = {'ETH0_IP': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(IP_BY_MACADDR, val) + + def test_get_ip6(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': '', } + expected = [IP6_GLOBAL] + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_ula(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_ULA] + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_dual(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_GLOBAL, IP6_ULA] + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_prefix(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6_prefix('eth0') + self.assertEqual(IP6_PREFIX, val) + + def test_get_ip6_prefix_emptystring(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty + string. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6_prefix('eth0') + self.assertEqual('64', val) + + def test_get_gateway(self): + """ + Verify get_gateway('device') correctly returns IPv4 default gateway + address. + """ + context = {'ETH0_GATEWAY': '1.2.3.5'} + net = ds.OpenNebulaNetwork(context) + val = net.get_gateway('eth0') + self.assertEqual('1.2.3.5', val) + + def test_get_gateway6(self): + """ + Verify get_gateway6('device') correctly returns IPv6 default gateway + address. + """ + context = {'ETH0_GATEWAY6': IP6_GW} + net = ds.OpenNebulaNetwork(context) + val = net.get_gateway6('eth0') + self.assertEqual(IP6_GW, val) + + def test_get_mask(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + """ + context = {'ETH0_MASK': '255.255.0.0'} + net = ds.OpenNebulaNetwork(context) + val = net.get_mask('eth0') + self.assertEqual('255.255.0.0', val) + + def test_get_mask_emptystring(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + It returns default value '255.255.255.0' if ETH0_MASK has empty string. + """ + context = {'ETH0_MASK': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_mask('eth0') + self.assertEqual('255.255.255.0', val) + + def test_get_network(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + """ + context = {'ETH0_NETWORK': '1.2.3.0'} + net = ds.OpenNebulaNetwork(context) + val = net.get_network('eth0', MACADDR) + self.assertEqual('1.2.3.0', val) + + def test_get_network_emptystring(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + It returns network address created by MAC address if ETH0_NETWORK has + empty string. + """ + context = {'ETH0_NETWORK': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_network('eth0', MACADDR) + self.assertEqual('10.18.1.0', val) + + def test_get_field(self): + """ + Verify get_field('device', 'name') returns *context* value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue(self): + """ + Verify get_field('device', 'name', 'default value') returns *context* + value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue_emptycontext(self): + """ + Verify get_field('device', 'name', 'default value') returns *default* + value if context value is empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DEFAULT_VALUE', val) + + def test_get_field_emptycontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + def test_get_field_nonecontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + None. + """ + context = {'ETH9_DUMMY': None} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway(self, m_get_phys_by_mac): + """Test rendering with/without IPv4 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '1.2.3.5', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway4': '1.2.3.5', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway6(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': IP6_GW, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway6': IP6_GW, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_ipv6address(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 address""" + self.maxDiff = None + # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': '', + 'ETH0_IP6_PREFIX_LENGTH': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_IP6_ULA': IP6_ULA, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [ + IP_BY_MACADDR + '/' + IP4_PREFIX, + IP6_GLOBAL + '/' + IP6_PREFIX, + IP6_ULA + '/' + IP6_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_dns(self, m_get_phys_by_mac): + """Test rendering with/without DNS server, search domain""" + self.maxDiff = None + # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '', + 'ETH0_DNS': '', + 'ETH0_SEARCH_DOMAIN': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']}, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_mtu(self, m_get_phys_by_mac): + """Test rendering with/without MTU""" + self.maxDiff = None + # empty ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '1280', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'mtu': '1280', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_eth0(self, m_get_phys_by_mac): for nic in self.system_nics: @@ -395,7 +802,6 @@ class TestOpenNebulaNetwork(unittest.TestCase): 'match': {'macaddress': MACADDR}, 'addresses': [IP_BY_MACADDR + '/16'], 'gateway4': '1.2.3.5', - 'gateway6': None, 'nameservers': { 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}} @@ -494,7 +900,6 @@ class TestOpenNebulaNetwork(unittest.TestCase): 'match': {'macaddress': MAC_1}, 'addresses': ['10.3.1.3/16'], 'gateway4': '10.3.0.1', - 'gateway6': None, 'nameservers': { 'addresses': ['10.3.1.2', '1.2.3.8'], 'search': [ diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 585acc33..a731f1ed 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -12,11 +12,11 @@ import re from cloudinit.tests import helpers as test_helpers from six.moves.urllib.parse import urlparse -from six import StringIO +from six import StringIO, text_type from cloudinit import helpers from cloudinit import settings -from cloudinit.sources import convert_vendordata, UNSET +from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources.helpers import openstack from cloudinit import util @@ -186,7 +186,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('meta_data.json'): os_files[k] = json.dumps(os_meta) _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) + self.assertRaises(BrokenMetadata, _read_metadata_service) def test_userdata_empty(self): os_files = copy.deepcopy(OS_FILES) @@ -217,7 +217,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('vendor_data.json'): os_files[k] = '{' # some invalid json _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) + self.assertRaises(BrokenMetadata, _read_metadata_service) def test_metadata_invalid(self): os_files = copy.deepcopy(OS_FILES) @@ -225,7 +225,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('meta_data.json'): os_files[k] = '{' # some invalid json _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) + self.assertRaises(BrokenMetadata, _read_metadata_service) @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_datasource(self, m_dhcp): @@ -510,6 +510,27 @@ class TestDetectOpenStack(test_helpers.CiTestCase): ds.detect_openstack(), 'Expected detect_openstack == True on OpenTelekomCloud') + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting Oracle cloud asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'Standard PC (i440FX + PIIX, 1996)' # No match + if dmi_key == 'chassis-asset-tag': + return 'OracleCloud.com' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(accept_oracle=True), + 'Expected detect_openstack == True on OracleCloud.com') + self.assertFalse( + ds.detect_openstack(accept_oracle=False), + 'Expected detect_openstack == False.') + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, @@ -534,4 +555,94 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_proc_env.assert_called_with(1) +class TestMetadataReader(test_helpers.HttprettyTestCase): + """Test the MetadataReader.""" + burl = 'http://169.254.169.254/' + md_base = { + 'availability_zone': 'myaz1', + 'hostname': 'sm-foo-test.novalocal', + "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}], + 'launch_index': 0, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'project_id': '6a103f813b774b9fb15a4fcd36e1c056', + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} + + def register(self, path, body=None, status=200): + content = (body if not isinstance(body, text_type) + else body.encode('utf-8')) + hp.register_uri( + hp.GET, self.burl + "openstack" + path, status=status, + body=content) + + def register_versions(self, versions): + self.register("", '\n'.join(versions)) + self.register("/", '\n'.join(versions)) + + def register_version(self, version, data): + content = '\n'.join(sorted(data.keys())) + self.register(version, content) + self.register(version + "/", content) + for path, content in data.items(): + self.register("/%s/%s" % (version, path), content) + self.register("/%s/%s" % (version, path), content) + if 'user_data' not in data: + self.register("/%s/user_data" % version, "nodata", status=404) + + def test__find_working_version(self): + """Test a working version ignores unsupported.""" + unsup = "2016-11-09" + self.register_versions( + [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup, + openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LIBERTY, + openstack.MetadataReader(self.burl)._find_working_version()) + + def test__find_working_version_uses_latest(self): + """'latest' should be used if no supported versions.""" + unsup1, unsup2 = ("2016-11-09", '2017-06-06') + self.register_versions([unsup1, unsup2, openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LATEST, + openstack.MetadataReader(self.burl)._find_working_version()) + + def test_read_v2_os_ocata(self): + """Validate return value of read_v2 for os_ocata data.""" + md = copy.deepcopy(self.md_base) + md['devices'] = [] + network_data = {'links': [], 'networks': [], 'services': []} + vendor_data = {} + vendor_data2 = {"static": {}} + + data = { + 'meta_data.json': json.dumps(md), + 'network_data.json': json.dumps(network_data), + 'vendor_data.json': json.dumps(vendor_data), + 'vendor_data2.json': json.dumps(vendor_data2), + } + + self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST]) + self.register_version(openstack.OS_OCATA, data) + + mock_read_ec2 = test_helpers.mock.MagicMock( + return_value={'instance-id': 'unused-ec2'}) + expected_md = copy.deepcopy(md) + expected_md.update( + {'instance-id': md['uuid'], 'local-hostname': md['hostname']}) + expected = { + 'userdata': '', # Annoying, no user-data results in empty string. + 'version': 2, + 'metadata': expected_md, + 'vendordata': vendor_data, + 'networkdata': network_data, + 'ec2-metadata': mock_read_ec2.return_value, + 'files': {}, + } + reader = openstack.MetadataReader(self.burl) + reader._read_ec2_metadata = mock_read_ec2 + self.assertEqual(expected, reader.read_v2()) + self.assertEqual(1, mock_read_ec2.call_count) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index fc4eb36e..9d52eb99 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -124,7 +124,9 @@ class TestDatasourceOVF(CiTestCase): ds = self.datasource(sys_cfg={}, distro={}, paths=paths) retcode = wrap_and_call( 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': None}, + {'util.read_dmi_data': None, + 'transport_iso9660': (False, None, None), + 'transport_vmware_guestd': (False, None, None)}, ds.get_data) self.assertFalse(retcode, 'Expected False return from ds.get_data') self.assertIn( @@ -138,7 +140,9 @@ class TestDatasourceOVF(CiTestCase): paths=paths) retcode = wrap_and_call( 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': 'vmware'}, + {'util.read_dmi_data': 'vmware', + 'transport_iso9660': (False, None, None), + 'transport_vmware_guestd': (False, None, None)}, ds.get_data) self.assertFalse(retcode, 'Expected False return from ds.get_data') self.assertIn( diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index e4e9bb20..c2bc7a00 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -176,11 +176,18 @@ class TestDataSourceScaleway(HttprettyTestCase): self.vendordata_url = \ DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] + self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway', + '_m_on_scaleway', return_value=True) + self.add_patch( + 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic', + '_m_find_fallback_nic', return_value='scalewaynic0') + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @mock.patch('time.sleep', return_value=None) - def test_metadata_ok(self, sleep, m_get_cmdline): + def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4): """ get_data() returns metadata, user data and vendor data. """ @@ -211,11 +218,12 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @mock.patch('time.sleep', return_value=None) - def test_metadata_404(self, sleep, m_get_cmdline): + def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4): """ get_data() returns metadata, but no user data nor vendor data. """ @@ -234,11 +242,12 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.get_vendordata_raw()) self.assertEqual(sleep.call_count, 0) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @mock.patch('time.sleep', return_value=None) - def test_metadata_rate_limit(self, sleep, m_get_cmdline): + def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4): """ get_data() is rate limited two times by the metadata API when fetching user data. @@ -262,3 +271,67 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertEqual(self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA) self.assertEqual(sleep.call_count, 2) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4 config if no ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + + netcfg = self.datasource.network_config + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}]}] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4/v6 configs if ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = { + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', + } + + netcfg = self.datasource.network_config + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}, + {'type': 'static', + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', } + ] + + }] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_existing(self, m_get_cmdline, fallback_nic): + """ + network_config() should return the same data if a network config + already exists + """ + m_get_cmdline.return_value = 'scaleway' + self.datasource._network_config = '0xdeadbeef' + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, '0xdeadbeef') diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index dca0b3d4..46d67b94 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -20,10 +20,8 @@ import multiprocessing import os import os.path import re -import shutil import signal import stat -import tempfile import unittest2 import uuid @@ -31,15 +29,27 @@ from cloudinit import serial from cloudinit.sources import DataSourceSmartOS from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, - SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ) + SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, + identify_file) import six from cloudinit import helpers as c_helpers -from cloudinit.util import (b64e, subp) +from cloudinit.util import ( + b64e, subp, ProcessExecutionError, which, write_file) -from cloudinit.tests.helpers import mock, FilesystemMockingTestCase, TestCase +from cloudinit.tests.helpers import ( + CiTestCase, mock, FilesystemMockingTestCase, skipIf) + +try: + import serial as _pyserial + assert _pyserial # avoid pyflakes error F401: import unused + HAS_PYSERIAL = True +except ImportError: + HAS_PYSERIAL = False + +DSMOS = 'cloudinit.sources.DataSourceSmartOS' SDC_NICS = json.loads(""" [ { @@ -366,37 +376,33 @@ class PsuedoJoyentClient(object): class TestSmartOSDataSource(FilesystemMockingTestCase): + jmc_cfact = None + get_smartos_environ = None + def setUp(self): super(TestSmartOSDataSource, self).setUp() - dsmos = 'cloudinit.sources.DataSourceSmartOS' - patcher = mock.patch(dsmos + ".jmc_client_factory") - self.jmc_cfact = patcher.start() - self.addCleanup(patcher.stop) - patcher = mock.patch(dsmos + ".get_smartos_environ") - self.get_smartos_environ = patcher.start() - self.addCleanup(patcher.stop) - - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.paths = c_helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - - self.legacy_user_d = os.path.join(self.tmp, 'legacy_user_tmp') + self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ") + self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact") + self.legacy_user_d = self.tmp_path('legacy_user_tmp') os.mkdir(self.legacy_user_d) - - self.orig_lud = DataSourceSmartOS.LEGACY_USER_D - DataSourceSmartOS.LEGACY_USER_D = self.legacy_user_d - - def tearDown(self): - DataSourceSmartOS.LEGACY_USER_D = self.orig_lud - super(TestSmartOSDataSource, self).tearDown() + self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d", + autospec=False, new=self.legacy_user_d) + self.add_patch(DSMOS + ".identify_file", "m_identify_file", + return_value="text/plain") def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM, sys_cfg=None, ds_cfg=None): self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata) self.get_smartos_environ.return_value = mode + tmpd = self.tmp_dir() + dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), + 'run_dir': self.tmp_path('run_dir')} + for d in dirs.values(): + os.mkdir(d) + paths = c_helpers.Paths(dirs) + if sys_cfg is None: sys_cfg = {} @@ -405,7 +411,7 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): sys_cfg['datasource']['SmartOS'] = ds_cfg return DataSourceSmartOS.DataSourceSmartOS( - sys_cfg, distro=None, paths=self.paths) + sys_cfg, distro=None, paths=paths) def test_no_base64(self): ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} @@ -493,6 +499,7 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): dsrc.metadata['user-script']) legacy_script_f = "%s/user-script" % self.legacy_user_d + print("legacy_script_f=%s" % legacy_script_f) self.assertTrue(os.path.exists(legacy_script_f)) self.assertTrue(os.path.islink(legacy_script_f)) user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] @@ -640,6 +647,28 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): mydscfg['disk_aliases']['FOO']) +class TestIdentifyFile(CiTestCase): + """Test the 'identify_file' utility.""" + @skipIf(not which("file"), "command 'file' not available.") + def test_file_happy_path(self): + """Test file is available and functional on plain text.""" + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + with self.allow_subp(["file"]): + self.assertEqual("text/plain", identify_file(fname)) + + @mock.patch(DSMOS + ".util.subp") + def test_returns_none_on_error(self, m_subp): + """On 'file' execution error, None should be returned.""" + m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99) + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + self.assertEqual(None, identify_file(fname)) + self.assertEqual( + [mock.call(["file", "--brief", "--mime-type", fname])], + m_subp.call_args_list) + + class ShortReader(object): """Implements a 'read' interface for bytes provided. much like io.BytesIO but the 'endbyte' acts as if EOF. @@ -893,7 +922,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): self.assertEqual(client.list(), []) -class TestNetworkConversion(TestCase): +class TestNetworkConversion(CiTestCase): def test_convert_simple(self): expected = { 'version': 1, @@ -1058,7 +1087,8 @@ class TestNetworkConversion(TestCase): "Only supported on KVM and bhyve guests under SmartOS") @unittest2.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), "Requires write access to " + SERIAL_DEVICE) -class TestSerialConcurrency(TestCase): +@unittest2.skipUnless(HAS_PYSERIAL is True, "pyserial not available") +class TestSerialConcurrency(CiTestCase): """ This class tests locking on an actual serial port, and as such can only be run in a kvm or bhyve guest running on a SmartOS host. A test run on @@ -1066,7 +1096,11 @@ class TestSerialConcurrency(TestCase): there is only one session over a connection. In contrast, in the absence of proper locking multiple processes opening the same serial port can corrupt each others' exchanges with the metadata server. + + This takes on the order of 2 to 3 minutes to run. """ + allowed_subp = ['mdata-get'] + def setUp(self): self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) self.mdata_proc.start() @@ -1097,7 +1131,7 @@ class TestSerialConcurrency(TestCase): keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) - client = ds.jmc_client_factory() + client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM) self.assertIsNotNone(client) # The behavior that we are testing for was observed mdata-get running diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index 07176caa..c3f258d5 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -1,7 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +import re + from cloudinit import distros -from cloudinit.tests.helpers import (TestCase, mock) +from cloudinit import ssh_util +from cloudinit.tests.helpers import (CiTestCase, mock) class MyBaseDistro(distros.Distro): @@ -44,8 +47,12 @@ class MyBaseDistro(distros.Distro): @mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False) @mock.patch("cloudinit.distros.util.subp") -class TestCreateUser(TestCase): +class TestCreateUser(CiTestCase): + + with_logs = True + def setUp(self): + super(TestCreateUser, self).setUp() self.dist = MyBaseDistro() def _useradd2call(self, args): @@ -153,4 +160,84 @@ class TestCreateUser(TestCase): [self._useradd2call([user, '-m']), mock.call(['passwd', '-l', user])]) + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_setup_ssh_authorized_keys_with_string( + self, m_setup_user_keys, m_subp, m_is_snappy): + """ssh_authorized_keys allows string and calls setup_user_keys.""" + user = 'foouser' + self.dist.create_user(user, ssh_authorized_keys='mykey') + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + m_setup_user_keys.assert_called_once_with(set(['mykey']), user) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_setup_ssh_authorized_keys_with_list( + self, m_setup_user_keys, m_subp, m_is_snappy): + """ssh_authorized_keys allows lists and calls setup_user_keys.""" + user = 'foouser' + self.dist.create_user(user, ssh_authorized_keys=['key1', 'key2']) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + m_setup_user_keys.assert_called_once_with(set(['key1', 'key2']), user) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_setup_ssh_authorized_keys_with_integer( + self, m_setup_user_keys, m_subp, m_is_snappy): + """ssh_authorized_keys warns on non-iterable/string type.""" + user = 'foouser' + self.dist.create_user(user, ssh_authorized_keys=-1) + m_setup_user_keys.assert_called_once_with(set([]), user) + match = re.match( + r'.*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for' + ' \'ssh_authorized_keys\'.*', + self.logs.getvalue(), + re.DOTALL) + self.assertIsNotNone( + match, 'Missing ssh_authorized_keys invalid type warning') + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_create_user_with_ssh_redirect_user_no_cloud_keys( + self, m_setup_user_keys, m_subp, m_is_snappy): + """Log a warning when trying to redirect a user no cloud ssh keys.""" + user = 'foouser' + self.dist.create_user(user, ssh_redirect_user='someuser') + self.assertIn( + 'WARNING: Unable to disable ssh logins for foouser given ' + 'ssh_redirect_user: someuser. No cloud public-keys present.\n', + self.logs.getvalue()) + m_setup_user_keys.assert_not_called() + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_create_user_with_ssh_redirect_user_with_cloud_keys( + self, m_setup_user_keys, m_subp, m_is_snappy): + """Disable ssh when ssh_redirect_user and cloud ssh keys are set.""" + user = 'foouser' + self.dist.create_user( + user, ssh_redirect_user='someuser', cloud_public_ssh_keys=['key1']) + disable_prefix = ssh_util.DISABLE_USER_OPTS + disable_prefix = disable_prefix.replace('$USER', 'someuser') + disable_prefix = disable_prefix.replace('$DISABLE_USER', user) + m_setup_user_keys.assert_called_once_with( + set(['key1']), 'foouser', options=disable_prefix) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys( + self, m_setup_user_keys, m_subp, m_is_snappy): + """Do not disable ssh_authorized_keys when ssh_redirect_user is set.""" + user = 'foouser' + self.dist.create_user( + user, ssh_authorized_keys='auth1', ssh_redirect_user='someuser', + cloud_public_ssh_keys=['key1']) + disable_prefix = ssh_util.DISABLE_USER_OPTS + disable_prefix = disable_prefix.replace('$USER', 'someuser') + disable_prefix = disable_prefix.replace('$DISABLE_USER', user) + self.assertEqual( + m_setup_user_keys.call_args_list, + [mock.call(set(['auth1']), user), # not disabled + mock.call(set(['key1']), 'foouser', options=disable_prefix)]) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 7765e408..6e339355 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -2,24 +2,19 @@ import os from six import StringIO -import stat from textwrap import dedent try: from unittest import mock except ImportError: import mock -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack from cloudinit import distros from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit import helpers -from cloudinit.net import eni from cloudinit import settings -from cloudinit.tests.helpers import FilesystemMockingTestCase +from cloudinit.tests.helpers import ( + FilesystemMockingTestCase, dir2dict, populate_dir) from cloudinit import util @@ -39,6 +34,19 @@ auto eth1 iface eth1 inet dhcp ''' +BASE_NET_CFG_FROM_V2 = ''' +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 192.168.1.5/24 + gateway 192.168.1.254 + +auto eth1 +iface eth1 inet dhcp +''' + BASE_NET_CFG_IPV6 = ''' auto lo iface lo inet loopback @@ -82,7 +90,7 @@ V1_NET_CFG = {'config': [{'name': 'eth0', 'type': 'physical'}], 'version': 1} -V1_NET_CFG_OUTPUT = """ +V1_NET_CFG_OUTPUT = """\ # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -116,7 +124,7 @@ V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', 'version': 1} -V1_TO_V2_NET_CFG_OUTPUT = """ +V1_TO_V2_NET_CFG_OUTPUT = """\ # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -145,7 +153,7 @@ V2_NET_CFG = { } -V2_TO_V2_NET_CFG_OUTPUT = """ +V2_TO_V2_NET_CFG_OUTPUT = """\ # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -176,21 +184,10 @@ class WriteBuffer(object): return self.buffer.getvalue() -class TestNetCfgDistro(FilesystemMockingTestCase): - - frbsd_ifout = """\ -hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 - options=51b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,TSO4,LRO> - ether 00:15:5d:4c:73:00 - inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2 - inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255 - nd6 options=23<PERFORMNUD,ACCEPT_RTADV,AUTO_LINKLOCAL> - media: Ethernet autoselect (10Gbase-T <full-duplex>) - status: active -""" +class TestNetCfgDistroBase(FilesystemMockingTestCase): def setUp(self): - super(TestNetCfgDistro, self).setUp() + super(TestNetCfgDistroBase, self).setUp() self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') self.add_patch('cloudinit.util.system_info', 'm_sysinfo') self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} @@ -204,144 +201,6 @@ hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 paths = helpers.Paths({}) return cls(dname, cfg.get('system_info'), paths) - def test_simple_write_ub(self): - ub_distro = self._get_distro('ubuntu') - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - - ub_distro.apply_network(BASE_NET_CFG, False) - - self.assertEqual(len(write_bufs), 1) - eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg' - self.assertIn(eni_name, write_bufs) - write_buf = write_bufs[eni_name] - self.assertEqual(str(write_buf).strip(), BASE_NET_CFG.strip()) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_eni_ub(self): - ub_distro = self._get_distro('ubuntu') - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - # eni availability checks - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(eni, 'available', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'ensure_dir')) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - mocks.enter_context( - mock.patch("cloudinit.net.eni.glob.glob", - return_value=[])) - - ub_distro.apply_network_config(V1_NET_CFG, False) - - self.assertEqual(len(write_bufs), 2) - eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg' - self.assertIn(eni_name, write_bufs) - write_buf = write_bufs[eni_name] - self.assertEqual(str(write_buf).strip(), V1_NET_CFG_OUTPUT.strip()) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_v1_to_netplan_ub(self): - renderers = ['netplan'] - devlist = ['eth0', 'lo'] - ub_distro = self._get_distro('ubuntu', renderers=renderers) - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'ensure_dir')) - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=(0, 0))) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - mocks.enter_context( - mock.patch("cloudinit.net.netplan.get_devicelist", - return_value=devlist)) - - ub_distro.apply_network_config(V1_NET_CFG, False) - - self.assertEqual(len(write_bufs), 1) - netplan_name = '/etc/netplan/50-cloud-init.yaml' - self.assertIn(netplan_name, write_bufs) - write_buf = write_bufs[netplan_name] - self.assertEqual(str(write_buf).strip(), - V1_TO_V2_NET_CFG_OUTPUT.strip()) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_v2_passthrough_ub(self): - renderers = ['netplan'] - devlist = ['eth0', 'lo'] - ub_distro = self._get_distro('ubuntu', renderers=renderers) - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'ensure_dir')) - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=(0, 0))) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - # FreeBSD does not have '/sys/class/net' file, - # so we need mock here. - mocks.enter_context( - mock.patch.object(os, 'listdir', return_value=devlist)) - ub_distro.apply_network_config(V2_NET_CFG, False) - - self.assertEqual(len(write_bufs), 1) - netplan_name = '/etc/netplan/50-cloud-init.yaml' - self.assertIn(netplan_name, write_bufs) - write_buf = write_bufs[netplan_name] - self.assertEqual(str(write_buf).strip(), - V2_TO_V2_NET_CFG_OUTPUT.strip()) - self.assertEqual(write_buf.mode, 0o644) - def assertCfgEquals(self, blob1, blob2): b1 = dict(SysConf(blob1.strip().splitlines())) b2 = dict(SysConf(blob2.strip().splitlines())) @@ -353,6 +212,20 @@ hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 for (k, v) in b1.items(): self.assertEqual(v, b2[k]) + +class TestNetCfgDistroFreebsd(TestNetCfgDistroBase): + + frbsd_ifout = """\ +hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 + options=51b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,TSO4,LRO> + ether 00:15:5d:4c:73:00 + inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2 + inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255 + nd6 options=23<PERFORMNUD,ACCEPT_RTADV,AUTO_LINKLOCAL> + media: Ethernet autoselect (10Gbase-T <full-duplex>) + status: active +""" + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_list') @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out') def test_get_ip_nic_freebsd(self, ifname_out, iflist): @@ -376,349 +249,59 @@ hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 res = frbsd_distro.generate_fallback_config() self.assertIsNotNone(res) - def test_simple_write_rh(self): - rh_distro = self._get_distro('rhel') - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - - rh_distro.apply_network(BASE_NET_CFG, False) - - self.assertEqual(len(write_bufs), 4) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo'] - expected_buf = ''' -DEVICE="lo" -ONBOOT=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -DEVICE="eth0" -BOOTPROTO="static" -NETMASK="255.255.255.0" -IPADDR="192.168.1.5" -ONBOOT=yes -GATEWAY="192.168.1.254" -BROADCAST="192.168.1.0" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -DEVICE="eth1" -BOOTPROTO="dhcp" -ONBOOT=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_rh(self): - renderers = ['sysconfig'] - rh_distro = self._get_distro('rhel', renderers=renderers) - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - # sysconfig availability checks - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=True)) - - rh_distro.apply_network_config(V1_NET_CFG, False) - - self.assertEqual(len(write_bufs), 5) - - # eth0 - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=none -DEFROUTE=yes -DEVICE=eth0 -GATEWAY=192.168.1.254 -IPADDR=192.168.1.5 -NETMASK=255.255.255.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - # eth1 - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=dhcp -DEVICE=eth1 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_write_ipv6_rhel(self): - rh_distro = self._get_distro('rhel') - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - rh_distro.apply_network(BASE_NET_CFG_IPV6, False) - - self.assertEqual(len(write_bufs), 4) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo'] - expected_buf = ''' -DEVICE="lo" -ONBOOT=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -DEVICE="eth0" -BOOTPROTO="static" -NETMASK="255.255.255.0" -IPADDR="192.168.1.5" -ONBOOT=yes -GATEWAY="192.168.1.254" -BROADCAST="192.168.1.0" -IPV6INIT=yes -IPV6ADDR="2607:f0d0:1002:0011::2" -IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -DEVICE="eth1" -BOOTPROTO="static" -NETMASK="255.255.255.0" -IPADDR="192.168.1.6" -ONBOOT=no -GATEWAY="192.168.1.254" -BROADCAST="192.168.1.0" -IPV6INIT=yes -IPV6ADDR="2607:f0d0:1002:0011::3" -IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -NETWORKING_IPV6=yes -IPV6_AUTOCONF=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_ipv6_rh(self): - renderers = ['sysconfig'] - rh_distro = self._get_distro('rhel', renderers=renderers) - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=True)) - - rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) - - self.assertEqual(len(write_bufs), 5) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=none -DEFROUTE=yes -DEVICE=eth0 -IPV6ADDR=2607:f0d0:1002:0011::2/64 -IPV6INIT=yes -IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=dhcp -DEVICE=eth1 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -NETWORKING_IPV6=yes -IPV6_AUTOCONF=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - def test_simple_write_freebsd(self): fbsd_distro = self._get_distro('freebsd') - write_bufs = {} + rc_conf = '/etc/rc.conf' read_bufs = { - '/etc/rc.conf': '', - '/etc/resolv.conf': '', + rc_conf: 'initial-rc-conf-not-validated', + '/etc/resolv.conf': 'initial-resolv-conf-not-validated', } - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - def replace_read(fname, read_cb=None, quiet=False): - if fname not in read_bufs: - if fname in write_bufs: - return str(write_bufs[fname]) - raise IOError("%s not found" % fname) - else: - if fname in write_bufs: - return str(write_bufs[fname]) - return read_bufs[fname] - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=('vtnet0', ''))) - mocks.enter_context( - mock.patch.object(os.path, 'exists', return_value=False)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', replace_read)) - - fbsd_distro.apply_network(BASE_NET_CFG, False) - - self.assertIn('/etc/rc.conf', write_bufs) - write_buf = write_bufs['/etc/rc.conf'] - expected_buf = ''' -ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" -ifconfig_vtnet1="DHCP" -defaultrouter="192.168.1.254" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) + tmpd = self.tmp_dir() + populate_dir(tmpd, read_bufs) + with self.reRooted(tmpd): + with mock.patch("cloudinit.distros.freebsd.util.subp", + return_value=('vtnet0', '')): + fbsd_distro.apply_network(BASE_NET_CFG, False) + results = dir2dict(tmpd) + + self.assertIn(rc_conf, results) + self.assertCfgEquals( + dedent('''\ + ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" + ifconfig_vtnet1="DHCP" + defaultrouter="192.168.1.254" + '''), results[rc_conf]) + self.assertEqual(0o644, get_mode(rc_conf, tmpd)) + + def test_simple_write_freebsd_from_v2eni(self): + fbsd_distro = self._get_distro('freebsd') + + rc_conf = '/etc/rc.conf' + read_bufs = { + rc_conf: 'initial-rc-conf-not-validated', + '/etc/resolv.conf': 'initial-resolv-conf-not-validated', + } - def test_apply_network_config_fallback(self): + tmpd = self.tmp_dir() + populate_dir(tmpd, read_bufs) + with self.reRooted(tmpd): + with mock.patch("cloudinit.distros.freebsd.util.subp", + return_value=('vtnet0', '')): + fbsd_distro.apply_network(BASE_NET_CFG_FROM_V2, False) + results = dir2dict(tmpd) + + self.assertIn(rc_conf, results) + self.assertCfgEquals( + dedent('''\ + ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" + ifconfig_vtnet1="DHCP" + defaultrouter="192.168.1.254" + '''), results[rc_conf]) + self.assertEqual(0o644, get_mode(rc_conf, tmpd)) + + def test_apply_network_config_fallback_freebsd(self): fbsd_distro = self._get_distro('freebsd') # a weak attempt to verify that we don't have an implementation @@ -735,89 +318,293 @@ defaultrouter="192.168.1.254" "subnets": [{"type": "dhcp"}]}], 'version': 1} - write_bufs = {} + rc_conf = '/etc/rc.conf' read_bufs = { - '/etc/rc.conf': '', - '/etc/resolv.conf': '', + rc_conf: 'initial-rc-conf-not-validated', + '/etc/resolv.conf': 'initial-resolv-conf-not-validated', } - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - def replace_read(fname, read_cb=None, quiet=False): - if fname not in read_bufs: - if fname in write_bufs: - return str(write_bufs[fname]) - raise IOError("%s not found" % fname) - else: - if fname in write_bufs: - return str(write_bufs[fname]) - return read_bufs[fname] - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=('vtnet0', ''))) - mocks.enter_context( - mock.patch.object(os.path, 'exists', return_value=False)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', replace_read)) - - fbsd_distro.apply_network_config(mynetcfg, bring_up=False) - - self.assertIn('/etc/rc.conf', write_bufs) - write_buf = write_bufs['/etc/rc.conf'] - expected_buf = ''' -ifconfig_vtnet0="DHCP" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) + tmpd = self.tmp_dir() + populate_dir(tmpd, read_bufs) + with self.reRooted(tmpd): + with mock.patch("cloudinit.distros.freebsd.util.subp", + return_value=('vtnet0', '')): + fbsd_distro.apply_network_config(mynetcfg, bring_up=False) + results = dir2dict(tmpd) + + self.assertIn(rc_conf, results) + self.assertCfgEquals('ifconfig_vtnet0="DHCP"', results[rc_conf]) + self.assertEqual(0o644, get_mode(rc_conf, tmpd)) + + +class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroUbuntuEni, self).setUp() + self.distro = self._get_distro('ubuntu', renderers=['eni']) + + def eni_path(self): + return '/etc/network/interfaces.d/50-cloud-init.cfg' + + def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.eni.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_apply_network_config_eni_ub(self): + expected_cfgs = { + self.eni_path(): V1_NET_CFG_OUTPUT, + } + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify_eni(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) - def test_simple_write_opensuse(self): - """Opensuse network rendering writes appropriate sysconfg files.""" - tmpdir = self.tmp_dir() - self.patchOS(tmpdir) - self.patchUtils(tmpdir) - distro = self._get_distro('opensuse') - distro.apply_network(BASE_NET_CFG, False) +class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): + def setUp(self): + super(TestNetCfgDistroUbuntuNetplan, self).setUp() + self.distro = self._get_distro('ubuntu', renderers=['netplan']) + self.devlist = ['eth0', 'lo'] + + def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.netplan.available', + return_value=True): + with mock.patch("cloudinit.net.netplan.get_devicelist", + return_value=self.devlist): + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def netplan_path(self): + return '/etc/netplan/50-cloud-init.yaml' - lo_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-lo') - eth0_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth0') - eth1_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth1') + def test_apply_network_config_v1_to_netplan_ub(self): expected_cfgs = { - lo_path: dedent(''' - STARTMODE="auto" - USERCONTROL="no" - FIREWALL="no" - '''), - eth0_path: dedent(''' - BOOTPROTO="static" - BROADCAST="192.168.1.0" - GATEWAY="192.168.1.254" - IPADDR="192.168.1.5" - NETMASK="255.255.255.0" - STARTMODE="auto" - USERCONTROL="no" - ETHTOOL_OPTIONS="" - '''), - eth1_path: dedent(''' - BOOTPROTO="dhcp" - STARTMODE="auto" - USERCONTROL="no" - ETHTOOL_OPTIONS="" - ''') + self.netplan_path(): V1_TO_V2_NET_CFG_OUTPUT, } - for cfgpath in (lo_path, eth0_path, eth1_path): - self.assertCfgEquals( - expected_cfgs[cfgpath], - util.load_file(cfgpath)) - file_stat = os.stat(cfgpath) - self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_v2_passthrough_ub(self): + expected_cfgs = { + self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, + } + # ub_distro.apply_network_config(V2_NET_CFG, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V2_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroRedhat(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroRedhat, self).setUp() + self.distro = self._get_distro('rhel', renderers=['sysconfig']) + + def ifcfg_path(self, ifname): + return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname + + def control_path(self): + return '/etc/sysconfig/network' + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + self.assertCfgEquals(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_apply_network_config_rh(self): + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + GATEWAY=192.168.1.254 + IPADDR=192.168.1.5 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.control_path(): dedent("""\ + NETWORKING=yes + """), + } + # rh_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_ipv6_rh(self): + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + IPV6ADDR=2607:f0d0:1002:0011::2/64 + IPV6INIT=yes + IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.control_path(): dedent("""\ + NETWORKING=yes + NETWORKING_IPV6=yes + IPV6_AUTOCONF=no + """), + } + # rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroOpensuse, self).setUp() + self.distro = self._get_distro('opensuse', renderers=['sysconfig']) + + def ifcfg_path(self, ifname): + return '/etc/sysconfig/network/ifcfg-%s' % ifname + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + self.assertCfgEquals(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_apply_network_config_opensuse(self): + """Opensuse uses apply_network_config and renders sysconfig""" + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + GATEWAY=192.168.1.254 + IPADDR=192.168.1.5 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + } + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_ipv6_opensuse(self): + """Opensuse uses apply_network_config and renders sysconfig w/ipv6""" + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + IPV6ADDR=2607:f0d0:1002:0011::2/64 + IPV6INIT=yes + IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + } + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + +def get_mode(path, target=None): + return os.stat(util.target_path(target, path)).st_mode & 0o777 # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 64d9f9f8..46778e95 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -12,6 +12,7 @@ from cloudinit.tests.helpers import ( from cloudinit.sources import DataSourceIBMCloud as ds_ibm from cloudinit.sources import DataSourceSmartOS as ds_smartos +from cloudinit.sources import DataSourceOracle as ds_oracle UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") @@ -88,6 +89,7 @@ CallReturn = namedtuple('CallReturn', class DsIdentifyBase(CiTestCase): dsid_path = os.path.realpath('tools/ds-identify') + allowed_subp = ['sh'] def call(self, rootd=None, mocks=None, func="main", args=None, files=None, policy_dmi=DI_DEFAULT_POLICY, @@ -598,6 +600,18 @@ class TestIsIBMProvisioning(DsIdentifyBase): self.assertIn("from current boot", ret.stderr) +class TestOracle(DsIdentifyBase): + def test_found_by_chassis(self): + """Simple positive test of Oracle by chassis id.""" + self._test_ds_found('Oracle') + + def test_not_found(self): + """Simple negative test of Oracle.""" + mycfg = copy.deepcopy(VALID_CFG['Oracle']) + mycfg['files'][P_CHASSIS_ASSET_TAG] = "Not Oracle" + self._check_via_dict(mycfg, rc=RC_NOT_FOUND) + + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" if disks is None: @@ -838,6 +852,12 @@ VALID_CFG = { }, ], }, + 'Oracle': { + 'ds': 'Oracle', + 'files': { + P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + '\n', + } + }, 'SmartOS-bhyve': { 'ds': 'SmartOS', 'mocks': [ diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index 7a64c230..90fe6eed 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -48,6 +48,10 @@ ADD_APT_REPO_MATCH = r"^[\w-]+:\w" TARGET = None +MOCK_LSB_RELEASE_DATA = { + 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS', + 'release': '18.04', 'codename': 'bionic'} + class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """TestAptSourceConfig @@ -64,6 +68,9 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list") self.join = os.path.join self.matcher = re.compile(ADD_APT_REPO_MATCH).search + self.add_patch( + 'cloudinit.config.cc_apt_configure.util.lsb_release', + 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy()) @staticmethod def _add_apt_sources(*args, **kwargs): @@ -76,7 +83,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): Get the most basic default mrror and release info to be used in tests """ params = {} - params['RELEASE'] = util.lsb_release()['codename'] + params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release'] arch = 'amd64' params['MIRROR'] = cc_apt_configure.\ get_default_mirrors(arch)["PRIMARY"] @@ -464,7 +471,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'uri': 'http://testsec.ubuntu.com/%s/' % component}]} post = ("%s_dists_%s-updates_InRelease" % - (component, util.lsb_release()['codename'])) + (component, MOCK_LSB_RELEASE_DATA['codename'])) fromfn = ("%s/%s_%s" % (pre, archive, post)) tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) @@ -942,7 +949,8 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") self.assertEqual( orig, cc_apt_configure.disable_suites(["proposed"], orig, rel)) - def test_apt_v3_mirror_search_dns(self): + @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain') + def test_apt_v3_mirror_search_dns(self, m_get_hostname): """test_apt_v3_mirror_search_dns - Test searching dns patterns""" pmir = "phit" smir = "shit" diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py index b1375269..a76760fa 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/test_handler/test_handler_bootcmd.py @@ -118,7 +118,8 @@ class TestBootcmd(CiTestCase): 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]} with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - handle('cc_bootcmd', valid_config, cc, LOG, []) + with self.allow_subp(['/bin/sh']): + handle('cc_bootcmd', valid_config, cc, LOG, []) self.assertEqual(my_id + ' iid-datasource-none\n', util.load_file(out_file)) @@ -128,12 +129,13 @@ class TestBootcmd(CiTestCase): valid_config = {'bootcmd': ['exit 1']} # Script with error with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - with self.assertRaises(util.ProcessExecutionError) as ctxt_manager: - handle('does-not-matter', valid_config, cc, LOG, []) + with self.allow_subp(['/bin/sh']): + with self.assertRaises(util.ProcessExecutionError) as ctxt: + handle('does-not-matter', valid_config, cc, LOG, []) self.assertIn( 'Unexpected error while running command.\n' "Command: ['/bin/sh',", - str(ctxt_manager.exception)) + str(ctxt.exception)) self.assertIn( 'Failed to run bootcmd module does-not-matter', self.logs.getvalue()) diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index f4bbd66d..b16532ea 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -36,13 +36,21 @@ class TestInstallChefOmnibus(HttprettyTestCase): @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) def test_install_chef_from_omnibus_runs_chef_url_content(self): - """install_chef_from_omnibus runs downloaded OMNIBUS_URL as script.""" - chef_outfile = self.tmp_path('chef.out', self.new_root) - response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile) + """install_chef_from_omnibus calls subp_blob_in_tempfile.""" + response = b'#!/bin/bash\necho "Hi Mom"' httpretty.register_uri( httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200) - cc_chef.install_chef_from_omnibus() - self.assertEqual('Hi Mom\n', util.load_file(chef_outfile)) + ret = (None, None) # stdout, stderr but capture=False + + with mock.patch("cloudinit.config.cc_chef.util.subp_blob_in_tempfile", + return_value=ret) as m_subp_blob: + cc_chef.install_chef_from_omnibus() + # admittedly whitebox, but assuming subp_blob_in_tempfile works + # this should be fine. + self.assertEqual( + [mock.call(blob=response, args=[], basename='chef-omnibus-install', + capture=False)], + m_subp_blob.call_args_list) @mock.patch('cloudinit.config.cc_chef.url_helper.readurl') @mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile') diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py index ced05a8d..d854afcb 100644 --- a/tests/unittests/test_handler/test_handler_etc_hosts.py +++ b/tests/unittests/test_handler/test_handler_etc_hosts.py @@ -49,6 +49,7 @@ class TestHostsFile(t_help.FilesystemMockingTestCase): if '192.168.1.1\tblah.blah.us\tblah' not in contents: self.assertIsNone('Default etc/hosts content modified') + @t_help.skipUnlessJinja() def test_write_etc_hosts_suse_template(self): cfg = { 'manage_etc_hosts': 'template', diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index 4dd7e09f..2478ebc4 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -43,12 +43,12 @@ class TestLxd(t_help.CiTestCase): self.assertTrue(mock_util.which.called) # no bridge config, so maybe_cleanup should not be called. self.assertFalse(m_maybe_clean.called) - init_call = mock_util.subp.call_args_list[0][0][0] - self.assertEqual(init_call, - ['lxd', 'init', '--auto', - '--network-address=0.0.0.0', - '--storage-backend=zfs', - '--storage-pool=poolname']) + self.assertEqual( + [mock.call(['lxd', 'waitready', '--timeout=300']), + mock.call( + ['lxd', 'init', '--auto', '--network-address=0.0.0.0', + '--storage-backend=zfs', '--storage-pool=poolname'])], + mock_util.subp.call_args_list) @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 6fe3659d..0f22e579 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -3,6 +3,7 @@ from cloudinit.config import cc_ntp from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) + from cloudinit.tests.helpers import ( CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index f92175fd..feca56c2 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -150,10 +150,12 @@ class TestResizefs(CiTestCase): self.assertEqual(('growfs', '-y', devpth), _resize_ufs(mount_point, devpth)) + @mock.patch('cloudinit.util.is_container', return_value=False) @mock.patch('cloudinit.util.get_mount_info') @mock.patch('cloudinit.util.get_device_info_from_zpool') @mock.patch('cloudinit.util.parse_mount') - def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount): + def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount, + is_container): devpth = 'vmzroot/ROOT/freebsd' disk = 'gpt/system' fs_type = 'zfs' @@ -354,8 +356,10 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): ('btrfs', 'filesystem', 'resize', 'max', '/'), _resize_btrfs("/", "/dev/sda1")) + @mock.patch('cloudinit.util.is_container', return_value=True) @mock.patch('cloudinit.util.is_FreeBSD') - def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd): + def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd, + m_is_container): freebsd.return_value = True info = 'dev=gpt/system mnt_point=/ path=/' devpth = maybe_get_writable_device_path('gpt/system', info, LOG) diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index fb266faf..1bad07f6 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -4,7 +4,7 @@ from cloudinit.config.schema import ( CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, get_schema_doc, get_schema, validate_cloudconfig_file, validate_cloudconfig_schema, main) -from cloudinit.util import subp, write_file +from cloudinit.util import write_file from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema @@ -406,8 +406,14 @@ class CloudTestsIntegrationTest(CiTestCase): integration_testdir = os.path.sep.join( [testsdir, 'cloud_tests', 'testcases']) errors = [] - out, _ = subp(['find', integration_testdir, '-name', '*yaml']) - for filename in out.splitlines(): + + yaml_files = [] + for root, _dirnames, filenames in os.walk(integration_testdir): + yaml_files.extend([os.path.join(root, f) + for f in filenames if f.endswith(".yaml")]) + self.assertTrue(len(yaml_files) > 0) + + for filename in yaml_files: test_cfg = safe_load(open(filename)) cloud_config = test_cfg.get('cloud_config') if cloud_config: diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 5ab61cf2..5d9c7d92 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import net +from cloudinit import distros from cloudinit.net import cmdline from cloudinit.net import ( eni, interface_has_own_mac, natural_sort_key, netplan, network_state, @@ -129,7 +130,40 @@ OS_SAMPLES = [ 'in_macs': { 'fa:16:3e:ed:9a:59': 'eth0', }, - 'out_sysconfig': [ + 'out_sysconfig_opensuse': [ + ('etc/sysconfig/network/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +NETMASK=255.255.252.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/NetworkManager/conf.d/99-cloud-init.conf', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +[main] +dns = none +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], + 'out_sysconfig_rhel': [ ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. @@ -162,6 +196,7 @@ dns = none ('etc/udev/rules.d/70-persistent-net.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + }, { 'in_data': { @@ -195,7 +230,42 @@ dns = none 'in_macs': { 'fa:16:3e:ed:9a:59': 'eth0', }, - 'out_sysconfig': [ + 'out_sysconfig_opensuse': [ + ('etc/sysconfig/network/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +IPADDR1=10.0.0.10 +NETMASK=255.255.252.0 +NETMASK1=255.255.255.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/NetworkManager/conf.d/99-cloud-init.conf', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +[main] +dns = none +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], + 'out_sysconfig_rhel': [ ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. @@ -230,6 +300,7 @@ dns = none ('etc/udev/rules.d/70-persistent-net.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + }, { 'in_data': { @@ -283,7 +354,44 @@ dns = none 'in_macs': { 'fa:16:3e:ed:9a:59': 'eth0', }, - 'out_sysconfig': [ + 'out_sysconfig_opensuse': [ + ('etc/sysconfig/network/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +IPV6ADDR=2001:DB8::10/64 +IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" +IPV6INIT=yes +IPV6_DEFAULTGW=2001:DB8::1 +NETMASK=255.255.252.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/NetworkManager/conf.d/99-cloud-init.conf', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +[main] +dns = none +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], + 'out_sysconfig_rhel': [ ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. @@ -643,6 +751,7 @@ iface br0 inet static bridge_stp off bridge_waitport 1 eth3 bridge_waitport 2 eth4 + hwaddress bb:bb:bb:bb:bb:aa # control-alias br0 iface br0 inet6 static @@ -708,6 +817,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true interfaces: - eth1 - eth2 + macaddress: aa:bb:cc:dd:ee:ff parameters: mii-monitor-interval: 100 mode: active-backup @@ -720,6 +830,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true interfaces: - eth3 - eth4 + macaddress: bb:bb:bb:bb:bb:aa nameservers: addresses: - 8.8.8.8 @@ -803,6 +914,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPV6ADDR=2001:1::1/64 IPV6INIT=yes IPV6_DEFAULTGW=2001:4800:78ff:1b::1 + MACADDR=bb:bb:bb:bb:bb:aa NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes @@ -973,6 +1085,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true use_tempaddr: 1 forwarding: 1 # basically anything in /proc/sys/net/ipv6/conf/.../ + mac_address: bb:bb:bb:bb:bb:aa params: bridge_ageing: 250 bridge_bridgeprio: 22 @@ -1075,6 +1188,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true interfaces: - bond0s0 - bond0s1 + macaddress: aa:bb:cc:dd:e8:ff mtu: 9000 parameters: mii-monitor-interval: 100 @@ -1148,7 +1262,59 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true version: 2 """), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + 'ifcfg-bond0': textwrap.dedent("""\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" + BONDING_SLAVE0=bond0s0 + BONDING_SLAVE1=bond0s1 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=bond0 + GATEWAY=192.168.0.1 + MACADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::1/92 + IPV6INIT=yes + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no + """), + 'ifcfg-bond0s0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """), + 'ifroute-bond0': textwrap.dedent("""\ + ADDRESS0=10.1.3.0 + GATEWAY0=192.168.0.3 + NETMASK0=255.255.255.0 + """), + 'ifcfg-bond0s1': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """), + }, + + 'expected_sysconfig_rhel': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" @@ -1487,6 +1653,12 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, class TestGenerateFallbackConfig(CiTestCase): + def setUp(self): + super(TestGenerateFallbackConfig, self).setUp() + self.add_patch( + "cloudinit.util.get_cmdline", "m_get_cmdline", + return_value="root=/dev/sda1") + @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") @@ -1521,7 +1693,7 @@ class TestGenerateFallbackConfig(CiTestCase): # don't set rulepath so eni writes them renderer = eni.Renderer( {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -1585,7 +1757,7 @@ iface eth0 inet dhcp # don't set rulepath so eni writes them renderer = eni.Renderer( {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -1676,7 +1848,7 @@ iface eth1 inet dhcp self.assertEqual(0, mock_settle.call_count) -class TestSysConfigRendering(CiTestCase): +class TestRhelSysConfigRendering(CiTestCase): with_logs = True @@ -1684,6 +1856,13 @@ class TestSysConfigRendering(CiTestCase): header = ('# Created by cloud-init on instance boot automatically, ' 'do not edit.\n#\n') + expected_name = 'expected_sysconfig' + + def _get_renderer(self): + distro_cls = distros.fetch('rhel') + return sysconfig.Renderer( + config=distro_cls.renderer_configs.get('sysconfig')) + def _render_and_read(self, network_config=None, state=None, dir=None): if dir is None: dir = self.tmp_dir() @@ -1695,8 +1874,8 @@ class TestSysConfigRendering(CiTestCase): else: raise ValueError("Expected data or state, got neither") - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=dir) return dir2dict(dir) def _compare_files_to_expected(self, expected, found): @@ -1722,12 +1901,13 @@ class TestSysConfigRendering(CiTestCase): if missing: raise AssertionError("Missing headers in: %s" % missing) + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + mock_sys_dev_path, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -1739,8 +1919,8 @@ class TestSysConfigRendering(CiTestCase): render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000' with open(os.path.join(render_dir, render_file)) as fh: @@ -1791,9 +1971,9 @@ USERCTL=no network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) - renderer = sysconfig.Renderer() + renderer = self._get_renderer() with self.assertRaises(ValueError): - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertEqual([], os.listdir(render_dir)) def test_multiple_ipv6_default_gateways(self): @@ -1829,9 +2009,9 @@ USERCTL=no network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) - renderer = sysconfig.Renderer() + renderer = self._get_renderer() with self.assertRaises(ValueError): - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertEqual([], os.listdir(render_dir)) def test_openstack_rendering_samples(self): @@ -1843,12 +2023,13 @@ USERCTL=no ex_input, known_macs=ex_mac_addrs) ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) - renderer = sysconfig.Renderer() + renderer = self._get_renderer() # render a multiple times to simulate reboots - renderer.render_network_state(ns, render_dir) - renderer.render_network_state(ns, render_dir) - renderer.render_network_state(ns, render_dir) - for fn, expected_content in os_sample.get('out_sysconfig', []): + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + for fn, expected_content in os_sample.get('out_sysconfig_rhel', + []): with open(os.path.join(render_dir, fn)) as fh: self.assertEqual(expected_content, fh.read()) @@ -1856,8 +2037,8 @@ USERCTL=no ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) render_dir = self.tmp_path("render") os.makedirs(render_dir) - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = '/etc/sysconfig/network-scripts/' self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) @@ -1882,8 +2063,8 @@ USERCTL=no ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") os.makedirs(render_dir) - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = '/etc/sysconfig/network-scripts/' self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) @@ -1900,33 +2081,332 @@ USERCTL=no self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) def test_bond_config(self): + expected_name = 'expected_sysconfig_rhel' + entry = NETWORK_CONFIGS['bond'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[expected_name], found) + self._assert_headers(found) + + def test_vlan_config(self): + entry = NETWORK_CONFIGS['vlan'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_bridge_config(self): + entry = NETWORK_CONFIGS['bridge'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_manual_config(self): + entry = NETWORK_CONFIGS['manual'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_all_config(self): + entry = NETWORK_CONFIGS['all'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + self.assertNotIn( + 'WARNING: Network config: ignoring eth0.101 device-level mtu', + self.logs.getvalue()) + + def test_small_config(self): + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_v4_and_v6_static_config(self): + entry = NETWORK_CONFIGS['v4_and_v6_static'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + expected_msg = ( + 'WARNING: Network config: ignoring iface0 device-level mtu:8999' + ' because ipv4 subnet-level mtu:9000 provided.') + self.assertIn(expected_msg, self.logs.getvalue()) + + def test_dhcpv6_only_config(self): + entry = NETWORK_CONFIGS['dhcpv6_only'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + +class TestOpenSuseSysConfigRendering(CiTestCase): + + with_logs = True + + scripts_dir = '/etc/sysconfig/network' + header = ('# Created by cloud-init on instance boot automatically, ' + 'do not edit.\n#\n') + + expected_name = 'expected_sysconfig' + + def _get_renderer(self): + distro_cls = distros.fetch('opensuse') + return sysconfig.Renderer( + config=distro_cls.renderer_configs.get('sysconfig')) + + def _render_and_read(self, network_config=None, state=None, dir=None): + if dir is None: + dir = self.tmp_dir() + + if network_config: + ns = network_state.parse_net_config_data(network_config) + elif state: + ns = state + else: + raise ValueError("Expected data or state, got neither") + + renderer = self._get_renderer() + renderer.render_network_state(ns, target=dir) + return dir2dict(dir) + + def _compare_files_to_expected(self, expected, found): + orig_maxdiff = self.maxDiff + expected_d = dict( + (os.path.join(self.scripts_dir, k), util.load_shell_content(v)) + for k, v in expected.items()) + + # only compare the files in scripts_dir + scripts_found = dict( + (k, util.load_shell_content(v)) for k, v in found.items() + if k.startswith(self.scripts_dir)) + try: + self.maxDiff = None + self.assertEqual(expected_d, scripts_found) + finally: + self.maxDiff = orig_maxdiff + + def _assert_headers(self, found): + missing = [f for f in found + if (f.startswith(self.scripts_dir) and + not found[f].startswith(self.header))] + if missing: + raise AssertionError("Missing headers in: %s" % missing) + + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_default_generation(self, mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, m_get_cmdline): + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path) + + network_cfg = net.generate_fallback_config() + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + + render_dir = os.path.join(tmp_dir, "render") + os.makedirs(render_dir) + + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + + render_file = 'etc/sysconfig/network/ifcfg-eth1000' + with open(os.path.join(render_dir, render_file)) as fh: + content = fh.read() + expected_content = """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth1000 +HWADDR=07-1C-C6-75-A4-BE +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip() + self.assertEqual(expected_content, content) + + def test_multiple_ipv4_default_gateways(self): + """ValueError is raised when duplicate ipv4 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + "type": "ipv4", "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [{ + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }, { + "netmask": "0.0.0.0", # A second default gateway + "network": "0.0.0.0", + "gateway": "172.20.3.254", + }], + "ip_address": "172.19.1.34", "id": "network0" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = self._get_renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, target=render_dir) + self.assertEqual([], os.listdir(render_dir)) + + def test_multiple_ipv6_default_gateways(self): + """ValueError is raised when duplicate ipv6 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "public-ipv6", + "type": "ipv6", "netmask": "", + "link": "tap1a81968a-79", + "routes": [{ + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::" + }, { + "gateway": "2001:DB9::1", + "netmask": "::", + "network": "::" + }], + "ip_address": "2001:DB8::10", "id": "network1" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = self._get_renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, target=render_dir) + self.assertEqual([], os.listdir(render_dir)) + + def test_openstack_rendering_samples(self): + for os_sample in OS_SAMPLES: + render_dir = self.tmp_dir() + ex_input = os_sample['in_data'] + ex_mac_addrs = os_sample['in_macs'] + network_cfg = openstack.convert_net_json( + ex_input, known_macs=ex_mac_addrs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = self._get_renderer() + # render a multiple times to simulate reboots + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + for fn, expected_content in os_sample.get('out_sysconfig_opensuse', + []): + with open(os.path.join(render_dir, fn)) as fh: + self.assertEqual(expected_content, fh.read()) + + def test_network_config_v1_samples(self): + ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=interface0 +GATEWAY=10.0.2.2 +HWADDR=52:54:00:12:34:00 +IPADDR=10.0.2.15 +NETMASK=255.255.255.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + + def test_config_with_explicit_loopback(self): + ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + + def test_bond_config(self): + expected_name = 'expected_sysconfig_opensuse' entry = NETWORK_CONFIGS['bond'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + for fname, contents in entry[expected_name].items(): + print(fname) + print(contents) + print() + print('-- expected ^ | v rendered --') + for fname, contents in found.items(): + print(fname) + print(contents) + print() + self._compare_files_to_expected(entry[expected_name], found) self._assert_headers(found) def test_vlan_config(self): entry = NETWORK_CONFIGS['vlan'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_bridge_config(self): entry = NETWORK_CONFIGS['bridge'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_manual_config(self): entry = NETWORK_CONFIGS['manual'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_all_config(self): entry = NETWORK_CONFIGS['all'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) self.assertNotIn( 'WARNING: Network config: ignoring eth0.101 device-level mtu', @@ -1935,13 +2415,13 @@ USERCTL=no def test_small_config(self): entry = NETWORK_CONFIGS['small'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_v4_and_v6_static_config(self): entry = NETWORK_CONFIGS['v4_and_v6_static'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) expected_msg = ( 'WARNING: Network config: ignoring iface0 device-level mtu:8999' @@ -1951,18 +2431,19 @@ USERCTL=no def test_dhcpv6_only_config(self): entry = NETWORK_CONFIGS['dhcpv6_only'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) class TestEniNetRendering(CiTestCase): + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + mock_sys_dev_path, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -1976,7 +2457,7 @@ class TestEniNetRendering(CiTestCase): renderer = eni.Renderer( {'eni_path': 'interfaces', 'netrules_path': None}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -1996,7 +2477,7 @@ iface eth1000 inet dhcp tmp_dir = self.tmp_dir() ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) renderer = eni.Renderer() - renderer.render_network_state(ns, tmp_dir) + renderer.render_network_state(ns, target=tmp_dir) expected = """\ auto lo iface lo inet loopback @@ -2010,6 +2491,7 @@ iface eth0 inet dhcp class TestNetplanNetRendering(CiTestCase): + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.netplan._clean_default") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @@ -2017,7 +2499,7 @@ class TestNetplanNetRendering(CiTestCase): def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path, - mock_clean_default): + mock_clean_default, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -2032,7 +2514,7 @@ class TestNetplanNetRendering(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': False}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, render_target))) @@ -2137,7 +2619,7 @@ class TestNetplanPostcommands(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': True}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) mock_netplan_generate.assert_called_with(run=True) mock_net_setup_link.assert_called_with(run=True) @@ -2162,7 +2644,7 @@ class TestNetplanPostcommands(CiTestCase): '/sys/class/net/lo'], capture=True), ] with mock.patch.object(os.path, 'islink', return_value=True): - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) mock_subp.assert_has_calls(expected) @@ -2357,7 +2839,7 @@ class TestNetplanRoundTrip(CiTestCase): renderer = netplan.Renderer( config={'netplan_path': netplan_path}) - renderer.render_network_state(ns, target) + renderer.render_network_state(ns, target=target) return dir2dict(target) def testsimple_render_bond_netplan(self): @@ -2447,7 +2929,7 @@ class TestEniRoundTrip(CiTestCase): renderer = eni.Renderer( config={'eni_path': eni_path, 'netrules_path': netrules_path}) - renderer.render_network_state(ns, dir) + renderer.render_network_state(ns, target=dir) return dir2dict(dir) def testsimple_convert_and_render(self): @@ -2778,11 +3260,15 @@ class TestGetInterfacesByMac(CiTestCase): def _se_interface_has_own_mac(self, name): return name in self.data['own_macs'] + def _se_get_ib_interface_hwaddr(self, name, ethernet_format): + ib_hwaddr = self.data.get('ib_hwaddr', {}) + return ib_hwaddr.get(name, {}).get(ethernet_format) + def _mock_setup(self): self.data = copy.deepcopy(self._data) self.data['devices'] = set(list(self.data['macs'].keys())) mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', - 'interface_has_own_mac', 'is_vlan') + 'interface_has_own_mac', 'is_vlan', 'get_ib_interface_hwaddr') self.mocks = {} for n in mocks: m = mock.patch('cloudinit.net.' + n, @@ -2856,6 +3342,20 @@ class TestGetInterfacesByMac(CiTestCase): ret = net.get_interfaces_by_mac() self.assertEqual('lo', ret[empty_mac]) + def test_ib(self): + ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' + ib_addr_eth_format = '00:11:22:33:44:56' + self._mock_setup() + self.data['devices'] = ['enp0s1', 'ib0'] + self.data['own_macs'].append('ib0') + self.data['macs']['ib0'] = ib_addr + self.data['ib_hwaddr'] = {'ib0': {True: ib_addr_eth_format, + False: ib_addr}} + result = net.get_interfaces_by_mac() + expected = {'aa:aa:aa:aa:aa:01': 'enp0s1', + ib_addr_eth_format: 'ib0', ib_addr: 'ib0'} + self.assertEqual(expected, result) + class TestInterfacesSorting(CiTestCase): @@ -2870,6 +3370,67 @@ class TestInterfacesSorting(CiTestCase): ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) +class TestGetIBHwaddrsByInterface(CiTestCase): + + _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' + _ib_addr_eth_format = '00:11:22:33:44:56' + _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1', + 'bridge1-nic', 'tun0', 'ib0'], + 'bonds': ['bond1'], + 'bridges': ['bridge1'], + 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', 'ib0'], + 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', + 'enp0s2': 'aa:aa:aa:aa:aa:02', + 'bond1': 'aa:aa:aa:aa:aa:01', + 'bridge1': 'aa:aa:aa:aa:aa:03', + 'bridge1-nic': 'aa:aa:aa:aa:aa:03', + 'tun0': None, + 'ib0': _ib_addr}, + 'ib_hwaddr': {'ib0': {True: _ib_addr_eth_format, + False: _ib_addr}}} + data = {} + + def _mock_setup(self): + self.data = copy.deepcopy(self._data) + mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', + 'interface_has_own_mac', 'get_ib_interface_hwaddr') + self.mocks = {} + for n in mocks: + m = mock.patch('cloudinit.net.' + n, + side_effect=getattr(self, '_se_' + n)) + self.addCleanup(m.stop) + self.mocks[n] = m.start() + + def _se_get_devicelist(self): + return self.data['devices'] + + def _se_get_interface_mac(self, name): + return self.data['macs'][name] + + def _se_is_bridge(self, name): + return name in self.data['bridges'] + + def _se_interface_has_own_mac(self, name): + return name in self.data['own_macs'] + + def _se_get_ib_interface_hwaddr(self, name, ethernet_format): + ib_hwaddr = self.data.get('ib_hwaddr', {}) + return ib_hwaddr.get(name, {}).get(ethernet_format) + + def test_ethernet(self): + self._mock_setup() + self.data['devices'].remove('ib0') + result = net.get_ib_hwaddrs_by_interface() + expected = {} + self.assertEqual(expected, result) + + def test_ib(self): + self._mock_setup() + result = net.get_ib_hwaddrs_by_interface() + expected = {'ib0': self._ib_addr} + self.assertEqual(expected, result) + + def _gzip_data(data): with io.BytesIO() as iobuf: gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf) diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py new file mode 100644 index 00000000..2e64c6c7 --- /dev/null +++ b/tests/unittests/test_reporting_hyperv.py @@ -0,0 +1,134 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.reporting import events +from cloudinit.reporting import handlers + +import json +import os + +from cloudinit import util +from cloudinit.tests.helpers import CiTestCase + + +class TestKvpEncoding(CiTestCase): + def test_encode_decode(self): + kvp = {'key': 'key1', 'value': 'value1'} + kvp_reporting = handlers.HyperVKvpReportingHandler() + data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) + self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) + decoded_kvp = kvp_reporting._decode_kvp_item(data) + self.assertEqual(kvp, decoded_kvp) + + +class TextKvpReporter(CiTestCase): + def setUp(self): + super(TextKvpReporter, self).setUp() + self.tmp_file_path = self.tmp_path('kvp_pool_file') + util.ensure_file(self.tmp_file_path) + + def test_event_type_can_be_filtered(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path, + event_types=['foo', 'bar']) + + reporter.publish_event( + events.ReportingEvent('foo', 'name', 'description')) + reporter.publish_event( + events.ReportingEvent('some_other', 'name', 'description3')) + reporter.q.join() + + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + reporter.publish_event( + events.ReportingEvent('bar', 'name', 'description2')) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(2, len(kvps)) + + self.assertIn('foo', kvps[0]['key']) + self.assertIn('bar', kvps[1]['key']) + self.assertNotIn('some_other', kvps[0]['key']) + self.assertNotIn('some_other', kvps[1]['key']) + + def test_events_are_over_written(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + + self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) + + reporter.publish_event( + events.ReportingEvent('foo', 'name1', 'description')) + reporter.publish_event( + events.ReportingEvent('foo', 'name2', 'description')) + reporter.q.join() + self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) + + reporter2 = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter2.incarnation_no = reporter.incarnation_no + 1 + reporter2.publish_event( + events.ReportingEvent('foo', 'name3', 'description')) + reporter2.q.join() + + self.assertEqual(2, len(list(reporter2._iterate_kvps(0)))) + + def test_events_with_higher_incarnation_not_over_written(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + + self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) + + reporter.publish_event( + events.ReportingEvent('foo', 'name1', 'description')) + reporter.publish_event( + events.ReportingEvent('foo', 'name2', 'description')) + reporter.q.join() + self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) + + reporter3 = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter3.incarnation_no = reporter.incarnation_no - 1 + reporter3.publish_event( + events.ReportingEvent('foo', 'name3', 'description')) + reporter3.q.join() + self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) + + def test_finish_event_result_is_logged(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter.publish_event( + events.FinishReportingEvent('name2', 'description1', + result=events.status.FAIL)) + reporter.q.join() + self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value']) + + def test_file_operation_issue(self): + os.remove(self.tmp_file_path) + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter.publish_event( + events.FinishReportingEvent('name2', 'description1', + result=events.status.FAIL)) + reporter.q.join() + + def test_event_very_long(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE + long_event = events.FinishReportingEvent( + 'event_name', + description, + result=events.status.FAIL) + reporter.publish_event(long_event) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(3, len(kvps)) + + # restore from the kvp to see the content are all there + full_description = '' + for i in range(len(kvps)): + msg_slice = json.loads(kvps[i]['value']) + self.assertEqual(msg_slice['msg_i'], i) + full_description += msg_slice['msg'] + self.assertEqual(description, full_description) diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py index 22718108..4cd27eed 100644 --- a/tests/unittests/test_rh_subscription.py +++ b/tests/unittests/test_rh_subscription.py @@ -8,10 +8,16 @@ import logging from cloudinit.config import cc_rh_subscription from cloudinit import util -from cloudinit.tests.helpers import TestCase, mock +from cloudinit.tests.helpers import CiTestCase, mock +SUBMGR = cc_rh_subscription.SubscriptionManager +SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli' + + +@mock.patch(SUB_MAN_CLI) +class GoodTests(CiTestCase): + with_logs = True -class GoodTests(TestCase): def setUp(self): super(GoodTests, self).setUp() self.name = "cc_rh_subscription" @@ -19,7 +25,6 @@ class GoodTests(TestCase): self.log = logging.getLogger("good_tests") self.args = [] self.handle = cc_rh_subscription.handle - self.SM = cc_rh_subscription.SubscriptionManager self.config = {'rh_subscription': {'username': 'scooby@do.com', @@ -35,55 +40,47 @@ class GoodTests(TestCase): 'disable-repo': ['repo4', 'repo5'] }} - def test_already_registered(self): + def test_already_registered(self, m_sman_cli): ''' Emulates a system that is already registered. Ensure it gets a non-ProcessExecution error from is_registered() ''' - with mock.patch.object(cc_rh_subscription.SubscriptionManager, - '_sub_man_cli') as mockobj: - self.SM.log_success = mock.MagicMock() - self.handle(self.name, self.config, self.cloud_init, - self.log, self.args) - self.assertEqual(self.SM.log_success.call_count, 1) - self.assertEqual(mockobj.call_count, 1) - - def test_simple_registration(self): + self.handle(self.name, self.config, self.cloud_init, + self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 1) + self.assertIn('System is already registered', self.logs.getvalue()) + + def test_simple_registration(self, m_sman_cli): ''' Simple registration with username and password ''' - self.SM.log_success = mock.MagicMock() reg = "The system has been registered with ID:" \ " 12345678-abde-abcde-1234-1234567890abc" - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (reg, 'bar')]) + m_sman_cli.side_effect = [util.ProcessExecutionError, (reg, 'bar')] self.handle(self.name, self.config, self.cloud_init, self.log, self.args) - self.assertIn(mock.call(['identity']), - self.SM._sub_man_cli.call_args_list) + self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list) self.assertIn(mock.call(['register', '--username=scooby@do.com', '--password=scooby-snacks'], logstring_val=True), - self.SM._sub_man_cli.call_args_list) - - self.assertEqual(self.SM.log_success.call_count, 1) - self.assertEqual(self.SM._sub_man_cli.call_count, 2) + m_sman_cli.call_args_list) + self.assertIn('rh_subscription plugin completed successfully', + self.logs.getvalue()) + self.assertEqual(m_sman_cli.call_count, 2) @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos") - @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_sub_man_cli") - def test_update_repos_disable_with_none(self, m_sub_man_cli, m_get_repos): + def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli): cfg = copy.deepcopy(self.config) m_get_repos.return_value = ([], ['repo1']) - m_sub_man_cli.return_value = (b'', b'') cfg['rh_subscription'].update( {'enable-repo': ['repo1'], 'disable-repo': None}) mysm = cc_rh_subscription.SubscriptionManager(cfg) self.assertEqual(True, mysm.update_repos()) m_get_repos.assert_called_with() - self.assertEqual(m_sub_man_cli.call_args_list, + self.assertEqual(m_sman_cli.call_args_list, [mock.call(['repos', '--enable=repo1'])]) - def test_full_registration(self): + def test_full_registration(self, m_sman_cli): ''' Registration with auto-attach, service-level, adding pools, and enabling and disabling yum repos @@ -93,26 +90,28 @@ class GoodTests(TestCase): call_lists.append(['repos', '--disable=repo5', '--enable=repo2', '--enable=repo3']) call_lists.append(['attach', '--auto', '--servicelevel=self-support']) - self.SM.log_success = mock.MagicMock() reg = "The system has been registered with ID:" \ " 12345678-abde-abcde-1234-1234567890abc" - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (reg, 'bar'), - ('Service level set to: self-support', ''), - ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''), - ('Repo ID: repo1\nRepo ID: repo5\n', ''), - ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: ' - 'repo4', ''), - ('', '')]) + m_sman_cli.side_effect = [ + util.ProcessExecutionError, + (reg, 'bar'), + ('Service level set to: self-support', ''), + ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''), + ('Repo ID: repo1\nRepo ID: repo5\n', ''), + ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''), + ('', '')] self.handle(self.name, self.config_full, self.cloud_init, self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 9) for call in call_lists: - self.assertIn(mock.call(call), self.SM._sub_man_cli.call_args_list) - self.assertEqual(self.SM.log_success.call_count, 1) - self.assertEqual(self.SM._sub_man_cli.call_count, 9) + self.assertIn(mock.call(call), m_sman_cli.call_args_list) + self.assertIn("rh_subscription plugin completed successfully", + self.logs.getvalue()) -class TestBadInput(TestCase): +@mock.patch(SUB_MAN_CLI) +class TestBadInput(CiTestCase): + with_logs = True name = "cc_rh_subscription" cloud_init = None log = logging.getLogger("bad_tests") @@ -155,81 +154,81 @@ class TestBadInput(TestCase): super(TestBadInput, self).setUp() self.handle = cc_rh_subscription.handle - def test_no_password(self): - ''' - Attempt to register without the password key/value - ''' - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + def assert_logged_warnings(self, warnings): + logs = self.logs.getvalue() + missing = [w for w in warnings if "WARNING: " + w not in logs] + self.assertEqual([], missing, "Missing expected warnings.") + + def test_no_password(self, m_sman_cli): + '''Attempt to register without the password key/value.''' + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_no_password, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM._sub_man_cli.call_count, 0) + self.assertEqual(m_sman_cli.call_count, 0) - def test_no_org(self): - ''' - Attempt to register without the org key/value - ''' - self.input_is_missing_data(self.config_no_key) - - def test_service_level_without_auto(self): - ''' - Attempt to register using service-level without the auto-attach key - ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + def test_no_org(self, m_sman_cli): + '''Attempt to register without the org key/value.''' + m_sman_cli.side_effect = [util.ProcessExecutionError] + self.handle(self.name, self.config_no_key, self.cloud_init, + self.log, self.args) + m_sman_cli.assert_called_with(['identity']) + self.assertEqual(m_sman_cli.call_count, 1) + self.assert_logged_warnings(( + 'Unable to register system due to incomplete information.', + 'Use either activationkey and org *or* userid and password', + 'Registration failed or did not run completely', + 'rh_subscription plugin did not complete successfully')) + + def test_service_level_without_auto(self, m_sman_cli): + '''Attempt to register using service-level without auto-attach key.''' + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_service, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM._sub_man_cli.call_count, 1) - self.assertEqual(self.SM.log_warn.call_count, 2) + self.assertEqual(m_sman_cli.call_count, 1) + self.assert_logged_warnings(( + 'The service-level key must be used in conjunction with ', + 'rh_subscription plugin did not complete successfully')) - def test_pool_not_a_list(self): + def test_pool_not_a_list(self, m_sman_cli): ''' Register with pools that are not in the format of a list ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_badpool, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM._sub_man_cli.call_count, 2) - self.assertEqual(self.SM.log_warn.call_count, 2) + self.assertEqual(m_sman_cli.call_count, 2) + self.assert_logged_warnings(( + 'Pools must in the format of a list', + 'rh_subscription plugin did not complete successfully')) - def test_repo_not_a_list(self): + def test_repo_not_a_list(self, m_sman_cli): ''' Register with repos that are not in the format of a list ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_badrepo, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM.log_warn.call_count, 3) - self.assertEqual(self.SM._sub_man_cli.call_count, 2) + self.assertEqual(m_sman_cli.call_count, 2) + self.assert_logged_warnings(( + 'Repo IDs must in the format of a list.', + 'Unable to add or remove repos', + 'rh_subscription plugin did not complete successfully')) - def test_bad_key_value(self): + def test_bad_key_value(self, m_sman_cli): ''' Attempt to register with a key that we don't know ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_badkey, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM.log_warn.call_count, 2) - self.assertEqual(self.SM._sub_man_cli.call_count, 1) - - def input_is_missing_data(self, config): - ''' - Helper def for tests that having missing information - ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError]) - self.handle(self.name, config, self.cloud_init, - self.log, self.args) - self.SM._sub_man_cli.assert_called_with(['identity']) - self.assertEqual(self.SM.log_warn.call_count, 4) - self.assertEqual(self.SM._sub_man_cli.call_count, 1) + self.assertEqual(m_sman_cli.call_count, 1) + self.assert_logged_warnings(( + 'fookey is not a valid key for rh_subscription. Valid keys are:', + 'rh_subscription plugin did not complete successfully')) # vi: ts=4 expandtab diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 20c87efa..c36e6eb0 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -21,6 +21,9 @@ except ImportError: class TestTemplates(test_helpers.CiTestCase): + + with_logs = True + jinja_utf8 = b'It\xe2\x80\x99s not ascii, {{name}}\n' jinja_utf8_rbob = b'It\xe2\x80\x99s not ascii, bob\n'.decode('utf-8') @@ -124,6 +127,13 @@ $a,$b''' self.add_header("jinja", self.jinja_utf8), {"name": "bob"}), self.jinja_utf8_rbob) + def test_jinja_nonascii_render_undefined_variables_to_default_py3(self): + """Test py3 jinja render_to_string with undefined variable default.""" + self.assertEqual( + templater.render_string( + self.add_header("jinja", self.jinja_utf8), {}), + self.jinja_utf8_rbob.replace('bob', 'CI_MISSING_JINJA_VAR/name')) + def test_jinja_nonascii_render_to_file(self): """Test jinja render_to_file of a filename with non-ascii content.""" tmpl_fn = self.tmp_path("j-render-to-file.template") @@ -144,5 +154,18 @@ $a,$b''' result = templater.render_from_file(tmpl_fn, {"name": "bob"}) self.assertEqual(result, self.jinja_utf8_rbob) + @test_helpers.skipIfJinja() + def test_jinja_warns_on_missing_dep_and_uses_basic_renderer(self): + """Test jinja render_from_file will fallback to basic renderer.""" + tmpl_fn = self.tmp_path("j-render-from-file.template") + write_file(tmpl_fn, omode="wb", + content=self.add_header( + "jinja", self.jinja_utf8).encode('utf-8')) + result = templater.render_from_file(tmpl_fn, {"name": "bob"}) + self.assertEqual(result, self.jinja_utf8.decode()) + self.assertIn( + 'WARNING: Jinja not available as the selected renderer for desired' + ' template, reverting to the basic renderer.', + self.logs.getvalue()) # vi: ts=4 expandtab diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 7a203ce2..5a14479a 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -24,6 +24,7 @@ except ImportError: BASH = util.which('bash') +BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name' class FakeSelinux(object): @@ -742,6 +743,8 @@ class TestReadSeeded(helpers.TestCase): class TestSubp(helpers.CiTestCase): with_logs = True + allowed_subp = [BASH, 'cat', helpers.CiTestCase.SUBP_SHELL_TRUE, + BOGUS_COMMAND, sys.executable] stdin2err = [BASH, '-c', 'cat >&2'] stdin2out = ['cat'] @@ -749,7 +752,6 @@ class TestSubp(helpers.CiTestCase): utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] - bogus_command = 'this-is-not-expected-to-be-a-program-name' def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf @@ -848,9 +850,10 @@ class TestSubp(helpers.CiTestCase): util.write_file(noshebang, 'true\n') os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) - self.assertRaisesRegex(util.ProcessExecutionError, - r'Missing #! in script\?', - util.subp, (noshebang,)) + with self.allow_subp([noshebang]): + self.assertRaisesRegex(util.ProcessExecutionError, + r'Missing #! in script\?', + util.subp, (noshebang,)) def test_subp_combined_stderr_stdout(self): """Providing combine_capture as True redirects stderr to stdout.""" @@ -868,14 +871,14 @@ class TestSubp(helpers.CiTestCase): def test_exception_has_out_err_are_bytes_if_decode_false(self): """Raised exc should have stderr, stdout as bytes if no decode.""" with self.assertRaises(util.ProcessExecutionError) as cm: - util.subp([self.bogus_command], decode=False) + util.subp([BOGUS_COMMAND], decode=False) self.assertTrue(isinstance(cm.exception.stdout, bytes)) self.assertTrue(isinstance(cm.exception.stderr, bytes)) def test_exception_has_out_err_are_bytes_if_decode_true(self): """Raised exc should have stderr, stdout as string if no decode.""" with self.assertRaises(util.ProcessExecutionError) as cm: - util.subp([self.bogus_command], decode=True) + util.subp([BOGUS_COMMAND], decode=True) self.assertTrue(isinstance(cm.exception.stdout, six.string_types)) self.assertTrue(isinstance(cm.exception.stderr, six.string_types)) @@ -925,10 +928,10 @@ class TestSubp(helpers.CiTestCase): logs.append(log) with self.assertRaises(util.ProcessExecutionError): - util.subp([self.bogus_command], status_cb=status_cb) + util.subp([BOGUS_COMMAND], status_cb=status_cb) expected = [ - 'Begin run command: {cmd}\n'.format(cmd=self.bogus_command), + 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND), 'ERROR: End run command: invalid command provided\n'] self.assertEqual(expected, logs) @@ -940,13 +943,13 @@ class TestSubp(helpers.CiTestCase): logs.append(log) with self.assertRaises(util.ProcessExecutionError): - util.subp(['ls', '/I/dont/exist'], status_cb=status_cb) - util.subp(['ls'], status_cb=status_cb) + util.subp([BASH, '-c', 'exit 2'], status_cb=status_cb) + util.subp([BASH, '-c', 'exit 0'], status_cb=status_cb) expected = [ - 'Begin run command: ls /I/dont/exist\n', + 'Begin run command: %s -c exit 2\n' % BASH, 'ERROR: End run command: exit(2)\n', - 'Begin run command: ls\n', + 'Begin run command: %s -c exit 0\n' % BASH, 'End run command: exit(0)\n'] self.assertEqual(expected, logs) diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 036f6879..602dedb0 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -2,11 +2,15 @@ # Copyright (C) 2016 VMware INC. # # Author: Sankar Tanguturi <stanguturi@vmware.com> +# Pengpeng Sun <pengpengs@vmware.com> # # This file is part of cloud-init. See LICENSE file for license information. import logging +import os import sys +import tempfile +import textwrap from cloudinit.sources.DataSourceOVF import get_network_config_from_conf from cloudinit.sources.DataSourceOVF import read_vmware_imc @@ -343,4 +347,115 @@ class TestVmwareConfigFile(CiTestCase): conf = Config(cf) self.assertEqual("test-script", conf.custom_script_name) + +class TestVmwareNetConfig(CiTestCase): + """Test conversion of vmware config to cloud-init config.""" + + def _get_NicConfigurator(self, text): + fp = None + try: + with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(), + delete=False) as fp: + fp.write(text) + fp.close() + cfg = Config(ConfigFile(fp.name)) + return NicConfigurator(cfg.nics, use_system_devices=False) + finally: + if fp: + os.unlink(fp.name) + + def test_non_primary_nic_without_gateway(self): + """A non primary nic set is not required to have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}], + nc.generate()) + + def test_non_primary_nic_with_gateway(self): + """A non primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}, + {'type': 'route', 'destination': '10.20.84.0/22', + 'gateway': '10.20.87.253', 'metric': 10000}], + nc.generate()) + + def test_a_primary_nic_with_gateway(self): + """A primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + PRIMARY = true + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0', + 'gateway': '10.20.87.253'}]}], + nc.generate()) + + # vi: ts=4 expandtab |