From f55bb17ddb2fd64e039057bf7ee50951a0dc93e8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Dec 2018 17:22:45 +0000 Subject: Vmware: Add support for the com.vmware.guestInfo OVF transport. This adds support for reading OVF information over the 'com.vmware.guestInfo' tranport. The current implementation requires vmware-rpctool be installed in the system. LP: #1807466 --- tests/unittests/test_datasource/test_ovf.py | 72 ++++++++++++++++++++++++++--- tests/unittests/test_ds_identify.py | 15 ++++++ 2 files changed, 81 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index a226c032..e4af0fa3 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -17,6 +17,8 @@ from cloudinit.sources import DataSourceOVF as dsovf from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptNotFound) +MPATH = 'cloudinit.sources.DataSourceOVF.' + OVF_ENV_CONTENT = """ \n Date: Thu, 20 Dec 2018 20:52:05 +0000 Subject: OVF: simplify expected return values of transport functions. Transport functions (transport_iso9660 and transport_vmware_guestinfo) would return a tuple of 3 values, but only the first was ever used outside of test. The other values (device and filename) were just ignored. This just simplifies the transport functions to now return content (in string format) or None indicating that the transport was not found. --- cloudinit/sources/DataSourceOVF.py | 20 ++++----- tests/unittests/test_datasource/test_ovf.py | 70 ++++++++++------------------- 2 files changed, 33 insertions(+), 57 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 891d6547..3a3fcdf6 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -236,7 +236,7 @@ class DataSourceOVF(sources.DataSource): ('iso', transport_iso9660)] name = None for name, transfunc in np: - (contents, _dev, _fname) = transfunc() + contents = transfunc() if contents: break if contents: @@ -464,8 +464,8 @@ def maybe_cdrom_device(devname): return cdmatch.match(devname) is not None -# Transport functions take no input and return -# a 3 tuple of content, path, filename +# Transport functions are called with no arguments and return +# either None (indicating not present) or string content of an ovf-env.xml def transport_iso9660(require_iso=True): # Go through mounts to see if it was already mounted @@ -477,9 +477,9 @@ def transport_iso9660(require_iso=True): if not maybe_cdrom_device(dev): continue mp = info['mountpoint'] - (fname, contents) = get_ovf_env(mp) + (_fname, contents) = get_ovf_env(mp) if contents is not False: - return (contents, dev, fname) + return contents if require_iso: mtype = "iso9660" @@ -492,27 +492,27 @@ def transport_iso9660(require_iso=True): if maybe_cdrom_device(dev)] for dev in devs: try: - (fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) + (_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) except util.MountFailedError: LOG.debug("%s not mountable as iso9660", dev) continue if contents is not False: - return (contents, dev, fname) + return contents - return (False, None, None) + return None def transport_vmware_guestinfo(): rpctool = "vmware-rpctool" - not_found = (False, None, None) + not_found = None if not util.which(rpctool): return not_found cmd = [rpctool, "info-get guestinfo.ovfEnv"] try: out, _err = util.subp(cmd) if out: - return (out, rpctool, "guestinfo.ovfEnv") + return out LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out) except util.ProcessExecutionError as e: if e.exit_code != 1: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index e4af0fa3..349d54cc 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -19,6 +19,8 @@ from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( MPATH = 'cloudinit.sources.DataSourceOVF.' +NOT_FOUND = None + OVF_ENV_CONTENT = """ Date: Thu, 20 Dec 2018 21:49:09 +0000 Subject: Scaleway: Support ssh keys provided inside an instance tag. The change here will utilize ssh keys found inside an instance's tag. The tag value must start with 'AUTHORIZED_KEY'. --- cloudinit/sources/DataSourceScaleway.py | 11 +++- tests/unittests/test_datasource/test_scaleway.py | 76 ++++++++++++++++++++++-- 2 files changed, 82 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 9dc4ab23..b573b382 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -253,7 +253,16 @@ class DataSourceScaleway(sources.DataSource): return self.metadata['id'] def get_public_ssh_keys(self): - return [key['key'] for key in self.metadata['ssh_public_keys']] + ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']] + + akeypre = "AUTHORIZED_KEY=" + plen = len(akeypre) + for tag in self.metadata.get('tags', []): + if not tag.startswith(akeypre): + continue + ssh_keys.append(tag[:plen].replace("_", " ")) + + return ssh_keys def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata['hostname'] diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index c2bc7a00..f96bf0a2 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -49,6 +49,9 @@ class MetadataResponses(object): FAKE_METADATA = { 'id': '00000000-0000-0000-0000-000000000000', 'hostname': 'scaleway.host', + 'tags': [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ], 'ssh_public_keys': [{ 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', 'fingerprint': '2048 06:ae:... login (RSA)' @@ -204,10 +207,11 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertEqual(self.datasource.get_instance_id(), MetadataResponses.FAKE_METADATA['id']) - self.assertEqual(self.datasource.get_public_ssh_keys(), [ - elem['key'] for elem in - MetadataResponses.FAKE_METADATA['ssh_public_keys'] - ]) + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) self.assertEqual(self.datasource.get_hostname(), MetadataResponses.FAKE_METADATA['hostname']) self.assertEqual(self.datasource.get_userdata_raw(), @@ -218,6 +222,70 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) + def test_ssh_keys_empty(self): + """ + get_public_ssh_keys() should return empty list if no ssh key are + available + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys(), []) + + def test_ssh_keys_only_tags(self): + """ + get_public_ssh_keys() should return list of keys available in tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + ].sort()) + + def test_ssh_keys_only_conf(self): + """ + get_public_ssh_keys() should return list of keys available in + ssh_public_keys field + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + def test_ssh_keys_both(self): + """ + get_public_ssh_keys() should return a merge of keys available + in ssh_public_keys and tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) -- cgit v1.2.3 From fdadcb5fae51f4e6799314ab98e3aec56c79b17c Mon Sep 17 00:00:00 2001 From: Jason Zions Date: Tue, 15 Jan 2019 21:37:17 +0000 Subject: net: Wait for dhclient to daemonize before reading lease file cloud-init uses dhclient to fetch the DHCP lease so it can extract DHCP options. dhclient creates the leasefile, then writes to it; simply waiting for the leasefile to appear creates a race between dhclient and cloud-init. Instead, wait for dhclient to be parented by init. At that point, we know it has written to the leasefile, so it's safe to copy the file and kill the process. cloud-init creates a temporary directory in which to execute dhclient, and deletes that directory after it has killed the process. If cloud-init abandons waiting for dhclient to daemonize, it will still attempt to delete the temporary directory, but will not report an exception should that attempt fail. LP: #1794399 --- cloudinit/net/dhcp.py | 44 +++++++++++++++++++++++++++----------- cloudinit/net/tests/test_dhcp.py | 15 ++++++++++--- cloudinit/temp_utils.py | 4 ++-- cloudinit/tests/test_temp_utils.py | 18 +++++++++++++++- cloudinit/util.py | 16 ++++++++++++++ tests/unittests/test_util.py | 6 ++++++ 6 files changed, 84 insertions(+), 19 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 0db991db..c98a97cd 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -9,6 +9,7 @@ import logging import os import re import signal +import time from cloudinit.net import ( EphemeralIPv4Network, find_fallback_nic, get_devicelist, @@ -127,7 +128,9 @@ def maybe_perform_dhcp_discovery(nic=None): if not dhclient_path: LOG.debug('Skip dhclient configuration: No dhclient command found.') return [] - with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir: + with temp_utils.tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-', + needs_exe=True) as tdir: # Use /var/tmp because /run/cloud-init/tmp is mounted noexec return dhcp_discovery(dhclient_path, nic, tdir) @@ -195,24 +198,39 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): '-pf', pid_file, interface, '-sf', '/bin/true'] util.subp(cmd, capture=True) - # dhclient doesn't write a pid file until after it forks when it gets a - # proper lease response. Since cleandir is a temp directory that gets - # removed, we need to wait for that pidfile creation before the - # cleandir is removed, otherwise we get FileNotFound errors. + # Wait for pid file and lease file to appear, and for the process + # named by the pid file to daemonize (have pid 1 as its parent). If we + # try to read the lease file before daemonization happens, we might try + # to read it before the dhclient has actually written it. We also have + # to wait until the dhclient has become a daemon so we can be sure to + # kill the correct process, thus freeing cleandir to be deleted back + # up the callstack. missing = util.wait_for_files( [pid_file, lease_file], maxwait=5, naplen=0.01) if missing: LOG.warning("dhclient did not produce expected files: %s", ', '.join(os.path.basename(f) for f in missing)) return [] - pid_content = util.load_file(pid_file).strip() - try: - pid = int(pid_content) - except ValueError: - LOG.debug( - "pid file contains non-integer content '%s'", pid_content) - else: - os.kill(pid, signal.SIGKILL) + + ppid = 'unknown' + for _ in range(0, 1000): + pid_content = util.load_file(pid_file).strip() + try: + pid = int(pid_content) + except ValueError: + pass + else: + ppid = util.get_proc_ppid(pid) + if ppid == 1: + LOG.debug('killing dhclient with pid=%s', pid) + os.kill(pid, signal.SIGKILL) + return parse_dhcp_lease_file(lease_file) + time.sleep(0.01) + + LOG.error( + 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds', + pid_content, ppid, 0.01 * 1000 + ) return parse_dhcp_lease_file(lease_file) diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index cd3e7328..79e8842f 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -145,16 +145,20 @@ class TestDHCPDiscoveryClean(CiTestCase): 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) self.assertIn( - "pid file contains non-integer content ''", self.logs.getvalue()) + "dhclient(pid=, parentpid=unknown) failed " + "to daemonize after 10.0 seconds", + self.logs.getvalue()) m_kill.assert_not_called() + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.wait_for_files') @mock.patch('cloudinit.net.dhcp.util.subp') def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, m_subp, m_wait, - m_kill): + m_kill, + m_getppid): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" tmpdir = self.tmp_dir() dhclient_script = os.path.join(tmpdir, 'dhclient.orig') @@ -164,6 +168,7 @@ class TestDHCPDiscoveryClean(CiTestCase): pidfile = self.tmp_path('dhclient.pid', tmpdir) leasefile = self.tmp_path('dhcp.leases', tmpdir) m_wait.return_value = [pidfile] # Return the missing pidfile wait for + m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) self.assertEqual( mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), @@ -173,9 +178,10 @@ class TestDHCPDiscoveryClean(CiTestCase): self.logs.getvalue()) m_kill.assert_not_called() + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') - def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill): + def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): """dhcp_discovery brings up the interface and runs dhclient. It also returns the parsed dhcp.leases file generated in the sandbox. @@ -197,6 +203,7 @@ class TestDHCPDiscoveryClean(CiTestCase): pid_file = os.path.join(tmpdir, 'dhclient.pid') my_pid = 1 write_file(pid_file, "%d\n" % my_pid) + m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertItemsEqual( [{'interface': 'eth9', 'fixed-address': '192.168.2.74', @@ -355,3 +362,5 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): self.assertEqual(fake_lease, lease) # Ensure that dhcp discovery occurs m_dhcp.called_once_with() + +# vi: ts=4 expandtab diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py index c98a1b53..346276ec 100644 --- a/cloudinit/temp_utils.py +++ b/cloudinit/temp_utils.py @@ -81,7 +81,7 @@ def ExtendedTemporaryFile(**kwargs): @contextlib.contextmanager -def tempdir(**kwargs): +def tempdir(rmtree_ignore_errors=False, **kwargs): # This seems like it was only added in python 3.2 # Make it since its useful... # See: http://bugs.python.org/file12970/tempdir.patch @@ -89,7 +89,7 @@ def tempdir(**kwargs): try: yield tdir finally: - shutil.rmtree(tdir) + shutil.rmtree(tdir, ignore_errors=rmtree_ignore_errors) def mkdtemp(**kwargs): diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py index ffbb92cd..4a52ef89 100644 --- a/cloudinit/tests/test_temp_utils.py +++ b/cloudinit/tests/test_temp_utils.py @@ -2,8 +2,9 @@ """Tests for cloudinit.temp_utils""" -from cloudinit.temp_utils import mkdtemp, mkstemp +from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir from cloudinit.tests.helpers import CiTestCase, wrap_and_call +import os class TestTempUtils(CiTestCase): @@ -98,4 +99,19 @@ class TestTempUtils(CiTestCase): self.assertEqual('/fake/return/path', retval) self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + def test_tempdir_error_suppression(self): + """test tempdir suppresses errors during directory removal.""" + + with self.assertRaises(OSError): + with tempdir(prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # As a result, the directory is already gone, + # so shutil.rmtree should raise OSError + + with tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # Since the directory is already gone, shutil.rmtree would raise + # OSError, but we suppress that + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 7800f7bc..a8a232b6 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2876,4 +2876,20 @@ def udevadm_settle(exists=None, timeout=None): return subp(settle_cmd) +def get_proc_ppid(pid): + """ + Return the parent pid of a process. + """ + ppid = 0 + try: + contents = load_file("/proc/%s/stat" % pid, quiet=True) + except IOError as e: + LOG.warning('Failed to load /proc/%s/stat. %s', pid, e) + if contents: + parts = contents.split(" ", 4) + # man proc says + # ppid %d (4) The PID of the parent. + ppid = int(parts[3]) + return ppid + # vi: ts=4 expandtab diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 5a14479a..0e71db82 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1171,4 +1171,10 @@ class TestGetProcEnv(helpers.TestCase): self.assertEqual({}, util.get_proc_env(1)) self.assertEqual(1, m_load_file.call_count) + def test_get_proc_ppid(self): + """get_proc_ppid returns correct parent pid value.""" + my_pid = os.getpid() + my_ppid = os.getppid() + self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) + # vi: ts=4 expandtab -- cgit v1.2.3 From 3861102fcaf47a882516d8b6daab518308eb3086 Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Fri, 18 Jan 2019 15:36:19 +0000 Subject: net: Make sysconfig renderer compatible with Network Manager. The 'sysconfig' renderer is activated if, and only if, there's ifup and ifdown commands present in its search dictonary or the network-scripts configuration files are found. This patch adds a check for Network- Manager configuration file as well. This solution is based on the use of the plugin 'ifcfg-rh' present in Network-Manager and is designed to support Fedora 29 or other distributions that also replaced network-scripts by Network-Manager. --- cloudinit/net/sysconfig.py | 36 +++++++++++++++++++++++ tests/unittests/test_net.py | 71 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) (limited to 'tests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 17293e1d..ae41f7b3 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -10,11 +10,14 @@ from cloudinit.distros.parsers import resolv_conf from cloudinit import log as logging from cloudinit import util +from configobj import ConfigObj + from . import renderer from .network_state import ( is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6) LOG = logging.getLogger(__name__) +NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" def _make_header(sep='#'): @@ -46,6 +49,24 @@ def _quote_value(value): return value +def enable_ifcfg_rh(path): + """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present""" + config = ConfigObj(path) + if 'main' in config: + if 'plugins' in config['main']: + if 'ifcfg-rh' in config['main']['plugins']: + return + else: + config['main']['plugins'] = [] + + if isinstance(config['main']['plugins'], list): + config['main']['plugins'].append('ifcfg-rh') + else: + config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh'] + config.write() + LOG.debug('Enabled ifcfg-rh NetworkManager plugins') + + class ConfigMap(object): """Sysconfig like dictionary object.""" @@ -657,6 +678,8 @@ class Renderer(renderer.Renderer): netrules_content = self._render_persistent_net(network_state) netrules_path = util.target_path(target, self.netrules_path) util.write_file(netrules_path, netrules_content, file_mode) + if available_nm(target=target): + enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE)) sysconfig_path = util.target_path(target, templates.get('control')) # Distros configuring /etc/sysconfig/network as a file e.g. Centos @@ -671,6 +694,13 @@ class Renderer(renderer.Renderer): def available(target=None): + sysconfig = available_sysconfig(target=target) + nm = available_nm(target=target) + + return any([nm, sysconfig]) + + +def available_sysconfig(target=None): expected = ['ifup', 'ifdown'] search = ['/sbin', '/usr/sbin'] for p in expected: @@ -686,4 +716,10 @@ def available(target=None): return True +def available_nm(target=None): + if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)): + return False + return True + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 195f261c..d679e92c 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -22,6 +22,7 @@ import os import textwrap import yaml + DHCP_CONTENT_1 = """ DEVICE='eth0' PROTO='dhcp' @@ -1880,6 +1881,7 @@ class TestRhelSysConfigRendering(CiTestCase): with_logs = True + nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf" scripts_dir = '/etc/sysconfig/network-scripts' header = ('# Created by cloud-init on instance boot automatically, ' 'do not edit.\n#\n') @@ -2174,6 +2176,75 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_check_ifcfg_rh(self): + """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" + render_dir = self.tmp_dir() + nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) + util.ensure_dir(os.path.dirname(nm_cfg)) + + # write a template nm.conf, note plugins is a list here + with open(nm_cfg, 'w') as fh: + fh.write('# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n') + self.assertTrue(os.path.exists(nm_cfg)) + + # render and read + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + # check ifcfg-rh is in the 'plugins' list + config = sysconfig.ConfigObj(nm_cfg) + self.assertIn('ifcfg-rh', config['main']['plugins']) + + def test_check_ifcfg_rh_plugins_string(self): + """ifcfg-rh plugin is append when plugins is a string.""" + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) + util.ensure_dir(os.path.dirname(nm_cfg)) + + # write a template nm.conf, note plugins is a value here + util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n') + + # render and read + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + # check raw content has plugin + nm_file_content = util.load_file(nm_cfg) + self.assertIn('ifcfg-rh', nm_file_content) + + # check ifcfg-rh is in the 'plugins' list + config = sysconfig.ConfigObj(nm_cfg) + self.assertIn('ifcfg-rh', config['main']['plugins']) + + def test_check_ifcfg_rh_plugins_no_plugins(self): + """enable_ifcfg_plugin creates plugins value if missing.""" + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) + util.ensure_dir(os.path.dirname(nm_cfg)) + + # write a template nm.conf, note plugins is missing + util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\n') + self.assertTrue(os.path.exists(nm_cfg)) + + # render and read + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + # check ifcfg-rh is in the 'plugins' list + config = sysconfig.ConfigObj(nm_cfg) + self.assertIn('ifcfg-rh', config['main']['plugins']) + class TestOpenSuseSysConfigRendering(CiTestCase): -- cgit v1.2.3 From b74ebca563a21332b29482c8029e7908f60225a4 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Wed, 23 Jan 2019 22:35:32 +0000 Subject: net/sysconfig: do not write a resolv.conf file with only the header. Writing the file with no dns information may prevent distro tools from writing a resolv.conf file with dns information obtained from a dhcp server. --- cloudinit/net/sysconfig.py | 5 ++++- tests/unittests/test_net.py | 8 ++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index ae41f7b3..fd8e5010 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -557,6 +557,8 @@ class Renderer(renderer.Renderer): content.add_nameserver(nameserver) for searchdomain in network_state.dns_searchdomains: content.add_search_domain(searchdomain) + if not str(content): + return None header = _make_header(';') content_str = str(content) if not content_str.startswith(header): @@ -666,7 +668,8 @@ class Renderer(renderer.Renderer): dns_path = util.target_path(target, self.dns_path) resolv_content = self._render_dns(network_state, existing_dns_path=dns_path) - util.write_file(dns_path, resolv_content, file_mode) + if resolv_content: + util.write_file(dns_path, resolv_content, file_mode) if self.networkmanager_conf_path: nm_conf_path = util.target_path(target, self.networkmanager_conf_path) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index d679e92c..5313d2df 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2098,6 +2098,10 @@ TYPE=Ethernet USERCTL=no """ self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + # The configuration has no nameserver information make sure we + # do not write the resolv.conf file + respath = '/etc/resolv.conf' + self.assertNotIn(respath, found.keys()) def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) @@ -2456,6 +2460,10 @@ TYPE=Ethernet USERCTL=no """ self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + # The configuration has no nameserver information make sure we + # do not write the resolv.conf file + respath = '/etc/resolv.conf' + self.assertNotIn(respath, found.keys()) def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) -- cgit v1.2.3 From c283321bb118d5408390e12b173440f57bd2c160 Mon Sep 17 00:00:00 2001 From: Johnson Shi Date: Fri, 25 Jan 2019 17:46:33 +0000 Subject: lxd: install zfs-linux instead of zfs meta package When using the LXD module cloud-init will attempt to install ZFS if it does not exist on the target system. However instead of installing the `zfsutils-linux` package it attempts to install `zfs` resulting in an error. Ubuntu Xenial (16.04) has zfs meta package, but Bionic (18.04) does not. Use the specific base package instead of zfs meta. Co-authored-by: Michael Skalka LP: #1799779 --- cloudinit/config/cc_lxd.py | 2 +- tests/unittests/test_handler/test_handler_lxd.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 24a8ebea..71d13ed8 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -89,7 +89,7 @@ def handle(name, cfg, cloud, log, args): packages.append('lxd') if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'): - packages.append('zfs') + packages.append('zfsutils-linux') if len(packages): try: diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index 2478ebc4..b63db616 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -62,7 +62,7 @@ class TestLxd(t_help.CiTestCase): cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) self.assertFalse(m_maybe_clean.called) install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEqual(sorted(install_pkg), ['lxd', 'zfs']) + self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux']) @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") -- cgit v1.2.3 From 3f12012eba2aabb6ca7b3ef70bc33a4aa1edada4 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Mon, 28 Jan 2019 17:06:58 +0000 Subject: sysconfig: On SUSE, use STARTMODE instead of ONBOOT ONBOOT is not recognized on openSUSE and SUSE Linux Enterprise, add the STARTMODE setting LP: #1799540 --- cloudinit/net/sysconfig.py | 2 ++ tests/unittests/test_distros/test_netconfig.py | 8 ++++++ tests/unittests/test_net.py | 40 ++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) (limited to 'tests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index fd8e5010..19b3e60c 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -273,6 +273,7 @@ class Renderer(renderer.Renderer): ('USERCTL', False), ('NM_CONTROLLED', False), ('BOOTPROTO', 'none'), + ('STARTMODE', 'auto'), ]) # If these keys exist, then their values will be used to form @@ -367,6 +368,7 @@ class Renderer(renderer.Renderer): iface_cfg.name)) if subnet.get('control') == 'manual': iface_cfg['ONBOOT'] = False + iface_cfg['STARTMODE'] = 'manual' # set IPv4 and IPv6 static addresses ipv4_index = -1 diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 6e339355..e986b593 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -468,6 +468,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -476,6 +477,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -499,6 +501,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -507,6 +510,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -559,6 +563,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -567,6 +572,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -587,6 +593,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -595,6 +602,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 5313d2df..e041e978 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -145,6 +145,7 @@ IPADDR=172.19.1.34 NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -178,6 +179,7 @@ IPADDR=172.19.1.34 NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -247,6 +249,7 @@ NETMASK=255.255.252.0 NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -282,6 +285,7 @@ NETMASK=255.255.252.0 NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -373,6 +377,7 @@ IPV6_DEFAULTGW=2001:DB8::1 NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -410,6 +415,7 @@ IPV6_DEFAULTGW=2001:DB8::1 NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -526,6 +532,7 @@ NETWORK_CONFIGS = { HWADDR=cf:d6:af:48:e8:80 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-eth99': textwrap.dedent("""\ @@ -542,6 +549,7 @@ NETWORK_CONFIGS = { METRIC=10000 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), }, @@ -655,6 +663,7 @@ NETWORK_CONFIGS = { NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no MTU=9000 @@ -694,6 +703,7 @@ NETWORK_CONFIGS = { DEVICE=iface0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -897,6 +907,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MACADDR=aa:bb:cc:dd:ee:ff NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Bond USERCTL=no"""), 'ifcfg-bond0.200': textwrap.dedent("""\ @@ -905,6 +916,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PHYSDEV=bond0 + STARTMODE=auto TYPE=Ethernet USERCTL=no VLAN=yes"""), @@ -922,6 +934,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PRIO=22 + STARTMODE=auto STP=no TYPE=Bridge USERCTL=no"""), @@ -931,6 +944,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=c0:d6:9f:2c:e8:80 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-eth0.101': textwrap.dedent("""\ @@ -949,6 +963,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 + STARTMODE=auto TYPE=Ethernet USERCTL=no VLAN=yes"""), @@ -959,6 +974,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MASTER=bond0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto SLAVE=yes TYPE=Ethernet USERCTL=no"""), @@ -969,6 +985,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MASTER=bond0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto SLAVE=yes TYPE=Ethernet USERCTL=no"""), @@ -979,6 +996,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=66:bb:9f:2c:e8:80 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-eth4': textwrap.dedent("""\ @@ -988,6 +1006,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=98:bb:9f:2c:e8:80 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-eth5': textwrap.dedent("""\ @@ -996,6 +1015,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=98:bb:9f:2c:e8:8a NM_CONTROLLED=no ONBOOT=no + STARTMODE=manual TYPE=Ethernet USERCTL=no""") }, @@ -1307,6 +1327,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Bond USERCTL=no """), @@ -1318,6 +1339,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes SLAVE=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1334,6 +1356,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes SLAVE=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1359,6 +1382,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Bond USERCTL=no """), @@ -1370,6 +1394,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes SLAVE=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1392,6 +1417,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes SLAVE=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1429,6 +1455,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=aa:bb:cc:dd:e8:00 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-en0.99': textwrap.dedent("""\ @@ -1447,6 +1474,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PHYSDEV=en0 + STARTMODE=auto TYPE=Ethernet USERCTL=no VLAN=yes"""), @@ -1488,6 +1516,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PRIO=22 + STARTMODE=auto STP=no TYPE=Bridge USERCTL=no @@ -1501,6 +1530,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPV6INIT=yes NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1513,6 +1543,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPV6INIT=yes NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1587,6 +1618,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=no + STARTMODE=manual TYPE=Ethernet USERCTL=no """), @@ -1597,6 +1629,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MTU=1480 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1606,6 +1639,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=52:54:00:12:34:ff NM_CONTROLLED=no ONBOOT=no + STARTMODE=manual TYPE=Ethernet USERCTL=no """), @@ -1973,6 +2007,7 @@ DEVICE=eth1000 HWADDR=07-1C-C6-75-A4-BE NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip() @@ -2094,6 +2129,7 @@ IPADDR=10.0.2.15 NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """ @@ -2119,6 +2155,7 @@ BOOTPROTO=dhcp DEVICE=eth0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """ @@ -2335,6 +2372,7 @@ DEVICE=eth1000 HWADDR=07-1C-C6-75-A4-BE NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip() @@ -2456,6 +2494,7 @@ IPADDR=10.0.2.15 NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """ @@ -2481,6 +2520,7 @@ BOOTPROTO=dhcp DEVICE=eth0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """ -- cgit v1.2.3 From 94a64529dccebd8fe8c7969370b8696e46023fbd Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Wed, 30 Jan 2019 15:38:56 +0000 Subject: Resolve flake8 comparison and pycodestyle over-ident issues Fixes: - flake8: use ==/!= to compare str, bytes, and int literals - pycodestyle: E117 over-indented --- cloudinit/config/schema.py | 2 +- cloudinit/handlers/upstart_job.py | 2 +- cloudinit/sources/DataSourceEc2.py | 2 +- cloudinit/stages.py | 4 ++-- cloudinit/url_helper.py | 2 +- tests/unittests/test_distros/test_netconfig.py | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 080a6d06..807c3eee 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -367,7 +367,7 @@ def handle_schema_args(name, args): if not args.annotate: error(str(e)) except RuntimeError as e: - error(str(e)) + error(str(e)) else: print("Valid cloud-config file {0}".format(args.config_file)) if args.doc: diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 83fb0724..003cad60 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -89,7 +89,7 @@ def _has_suitable_upstart(): util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) return True except util.ProcessExecutionError as e: - if e.exit_code is 1: + if e.exit_code == 1: pass else: util.logexc(LOG, "dpkg --compare-versions failed [%s]", diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 9ccf2cdc..eb6f27b2 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -442,7 +442,7 @@ def identify_aws(data): if (data['uuid'].startswith('ec2') and (data['uuid_source'] == 'hypervisor' or data['uuid'] == data['serial'])): - return CloudNames.AWS + return CloudNames.AWS return None diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8a064124..da7d349a 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -548,11 +548,11 @@ class Init(object): with events.ReportEventStack("consume-user-data", "reading and applying user-data", parent=self.reporter): - self._consume_userdata(frequency) + self._consume_userdata(frequency) with events.ReportEventStack("consume-vendor-data", "reading and applying vendor-data", parent=self.reporter): - self._consume_vendordata(frequency) + self._consume_vendordata(frequency) # Perform post-consumption adjustments so that # modules that run during the init stage reflect diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 396d69ae..0af0d9e3 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -521,7 +521,7 @@ class OauthUrlHelper(object): if extra_exception_cb: ret = extra_exception_cb(msg, exception) finally: - self.exception_cb(msg, exception) + self.exception_cb(msg, exception) return ret def _headers_cb(self, extra_headers_cb, url): diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index e986b593..e4530408 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -407,7 +407,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): self.assertEqual(0o644, get_mode(cfgpath, tmpd)) def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' + return '/etc/netplan/50-cloud-init.yaml' def test_apply_network_config_v1_to_netplan_ub(self): expected_cfgs = { -- cgit v1.2.3 From cf30836645473c62599e838ab48b2d31677fa584 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 7 Feb 2019 22:38:41 +0000 Subject: netplan: Don't render yaml aliases when dumping netplan Cloud-init rendered netplan with duplicate aliases if a network config included "global" nameserver/search values. Netplan uses can read yaml files which do use aliaes but cloud-init did not render a single yaml dictionary, instead it combined yaml sections into a single document which sometimes resulted in duplicate aliases being present. This branch introduces a yaml SafeDumper class which can set the 'ignore_aliases' attribute. This is not enabled by default but callers to util.yaml_dumps can pass a boolean to toggle this. The netplan render uses noalias=True and the resulting yaml output does not contain any aliases. LP: #1815051 --- cloudinit/net/netplan.py | 3 +- cloudinit/safeyaml.py | 7 + cloudinit/util.py | 17 ++- tests/unittests/test_net.py | 336 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 355 insertions(+), 8 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 21517fda..e54a34e5 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -361,7 +361,8 @@ class Renderer(renderer.Renderer): if section: dump = util.yaml_dumps({name: section}, explicit_start=False, - explicit_end=False) + explicit_end=False, + noalias=True) txt = util.indent(dump, ' ' * 4) return [txt] return [] diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 7bcf9dd3..3bd5e03d 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -17,6 +17,13 @@ _CustomSafeLoader.add_constructor( _CustomSafeLoader.construct_python_unicode) +class NoAliasSafeDumper(yaml.dumper.SafeDumper): + """A class which avoids constructing anchors/aliases on yaml dump""" + + def ignore_aliases(self, data): + return True + + def load(blob): return(yaml.load(blob, Loader=_CustomSafeLoader)) diff --git a/cloudinit/util.py b/cloudinit/util.py index a8a232b6..2be528a0 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1596,14 +1596,17 @@ def json_dumps(data): separators=(',', ': '), default=json_serialize_default) -def yaml_dumps(obj, explicit_start=True, explicit_end=True): +def yaml_dumps(obj, explicit_start=True, explicit_end=True, noalias=False): """Return data in nicely formatted yaml.""" - return yaml.safe_dump(obj, - line_break="\n", - indent=4, - explicit_start=explicit_start, - explicit_end=explicit_end, - default_flow_style=False) + + return yaml.dump(obj, + line_break="\n", + indent=4, + explicit_start=explicit_start, + explicit_end=explicit_end, + default_flow_style=False, + Dumper=(safeyaml.NoAliasSafeDumper + if noalias else yaml.dumper.Dumper)) def ensure_dir(path, mode=None): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e041e978..f001ae5a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -19,6 +19,7 @@ import gzip import io import json import os +import re import textwrap import yaml @@ -103,6 +104,309 @@ STATIC_EXPECTED_1 = { 'address': '10.0.0.2'}], } +V1_NAMESERVER_ALIAS = """ +config: +- id: eno1 + mac_address: 08:94:ef:51:ae:e0 + mtu: 1500 + name: eno1 + subnets: + - type: manual + type: physical +- id: eno2 + mac_address: 08:94:ef:51:ae:e1 + mtu: 1500 + name: eno2 + subnets: + - type: manual + type: physical +- id: eno3 + mac_address: 08:94:ef:51:ae:de + mtu: 1500 + name: eno3 + subnets: + - type: manual + type: physical +- bond_interfaces: + - eno1 + - eno3 + id: bondM + mac_address: 08:94:ef:51:ae:e0 + mtu: 1500 + name: bondM + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - address: 10.101.10.47/23 + gateway: 10.101.11.254 + type: static + type: bond +- id: eno4 + mac_address: 08:94:ef:51:ae:df + mtu: 1500 + name: eno4 + subnets: + - type: manual + type: physical +- id: enp0s20f0u1u6 + mac_address: 0a:94:ef:51:a4:b9 + mtu: 1500 + name: enp0s20f0u1u6 + subnets: + - type: manual + type: physical +- id: enp216s0f0 + mac_address: 68:05:ca:81:7c:e8 + mtu: 9000 + name: enp216s0f0 + subnets: + - type: manual + type: physical +- id: enp216s0f1 + mac_address: 68:05:ca:81:7c:e9 + mtu: 9000 + name: enp216s0f1 + subnets: + - type: manual + type: physical +- id: enp47s0f0 + mac_address: 68:05:ca:64:d3:6c + mtu: 9000 + name: enp47s0f0 + subnets: + - type: manual + type: physical +- bond_interfaces: + - enp216s0f0 + - enp47s0f0 + id: bond0 + mac_address: 68:05:ca:64:d3:6c + mtu: 9000 + name: bond0 + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - type: manual + type: bond +- id: bond0.3502 + mtu: 9000 + name: bond0.3502 + subnets: + - address: 172.20.80.4/25 + type: static + type: vlan + vlan_id: 3502 + vlan_link: bond0 +- id: bond0.3503 + mtu: 9000 + name: bond0.3503 + subnets: + - address: 172.20.80.129/25 + type: static + type: vlan + vlan_id: 3503 + vlan_link: bond0 +- id: enp47s0f1 + mac_address: 68:05:ca:64:d3:6d + mtu: 9000 + name: enp47s0f1 + subnets: + - type: manual + type: physical +- bond_interfaces: + - enp216s0f1 + - enp47s0f1 + id: bond1 + mac_address: 68:05:ca:64:d3:6d + mtu: 9000 + name: bond1 + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - address: 10.101.8.65/26 + routes: + - destination: 213.119.192.0/24 + gateway: 10.101.8.126 + metric: 0 + type: static + type: bond +- address: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + type: nameserver +version: 1 +""" + +NETPLAN_NO_ALIAS = """ +network: + version: 2 + ethernets: + eno1: + match: + macaddress: 08:94:ef:51:ae:e0 + mtu: 1500 + set-name: eno1 + eno2: + match: + macaddress: 08:94:ef:51:ae:e1 + mtu: 1500 + set-name: eno2 + eno3: + match: + macaddress: 08:94:ef:51:ae:de + mtu: 1500 + set-name: eno3 + eno4: + match: + macaddress: 08:94:ef:51:ae:df + mtu: 1500 + set-name: eno4 + enp0s20f0u1u6: + match: + macaddress: 0a:94:ef:51:a4:b9 + mtu: 1500 + set-name: enp0s20f0u1u6 + enp216s0f0: + match: + macaddress: 68:05:ca:81:7c:e8 + mtu: 9000 + set-name: enp216s0f0 + enp216s0f1: + match: + macaddress: 68:05:ca:81:7c:e9 + mtu: 9000 + set-name: enp216s0f1 + enp47s0f0: + match: + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + set-name: enp47s0f0 + enp47s0f1: + match: + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + set-name: enp47s0f1 + bonds: + bond0: + interfaces: + - enp216s0f0 + - enp47s0f0 + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + bond1: + addresses: + - 10.101.8.65/26 + interfaces: + - enp216s0f1 + - enp47s0f1 + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + routes: + - metric: 0 + to: 213.119.192.0/24 + via: 10.101.8.126 + bondM: + addresses: + - 10.101.10.47/23 + gateway4: 10.101.11.254 + interfaces: + - eno1 + - eno3 + macaddress: 08:94:ef:51:ae:e0 + mtu: 1500 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + vlans: + bond0.3502: + addresses: + - 172.20.80.4/25 + id: 3502 + link: bond0 + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + bond0.3503: + addresses: + - 172.20.80.129/25 + id: 3503 + link: bond0 + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas +""" + + # Examples (and expected outputs for various renderers). OS_SAMPLES = [ { @@ -3065,6 +3369,38 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def test_render_output_has_yaml_no_aliases(self): + entry = { + 'yaml': V1_NAMESERVER_ALIAS, + 'expected_netplan': NETPLAN_NO_ALIAS, + } + network_config = yaml.load(entry['yaml']) + ns = network_state.parse_net_config_data(network_config) + files = self._render_and_read(state=ns) + # check for alias + content = files['/etc/netplan/50-cloud-init.yaml'] + + # test load the yaml to ensure we don't render something not loadable + # this allows single aliases, but not duplicate ones + parsed = yaml.load(files['/etc/netplan/50-cloud-init.yaml']) + self.assertNotEqual(None, parsed) + + # now look for any alias, avoid rendering them entirely + # generate the first anchor string using the template + # as of this writing, looks like "&id001" + anchor = r'&' + yaml.serializer.Serializer.ANCHOR_TEMPLATE % 1 + found_alias = re.search(anchor, content, re.MULTILINE) + if found_alias: + msg = "Error at: %s\nContent:\n%s" % (found_alias, content) + raise ValueError('Found yaml alias in rendered netplan: ' + msg) + + print(entry['expected_netplan']) + print('-- expected ^ | v rendered --') + print(files['/etc/netplan/50-cloud-init.yaml']) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + class TestEniRoundTrip(CiTestCase): -- cgit v1.2.3 From fff37e7dc6849fd16db504b0d338fae20a7beb39 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 8 Feb 2019 22:08:47 +0000 Subject: netinfo: Adjust ifconfig output parsing for FreeBSD ipv6 entries FreeBSD ifconfig output for ipv6 addrs doesn't find scopeid values when present in the output and the pformat rendering assumes that an ipv6 address will have a 'scope6' entry in the netdev info dictionary. This patch finds the scopeid value, which is not always inside <>, and in some cases v6 addrs don't have a scopeid value in the output, so when rendering the table, allow scope6 value to be replaced with the empty value. LP: #1779672 --- cloudinit/netinfo.py | 7 +++++-- cloudinit/tests/test_netinfo.py | 14 ++++++++++++++ tests/data/netinfo/freebsd-ifconfig-output | 17 +++++++++++++++++ tests/data/netinfo/freebsd-netdev-formatted-output | 11 +++++++++++ 4 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 tests/data/netinfo/freebsd-ifconfig-output create mode 100644 tests/data/netinfo/freebsd-netdev-formatted-output (limited to 'tests') diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 9ff929c2..e91cd263 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -141,6 +141,9 @@ def _netdev_info_ifconfig(ifconfig_data): res = re.match(r'.*<(\S+)>', toks[i + 1]) if res: devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) + else: + devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1] + return devs @@ -389,8 +392,8 @@ def netdev_pformat(): addr.get('scope', empty), data["hwaddr"])) for addr in data.get('ipv6'): tbl.add_row( - (dev, data["up"], addr["ip"], empty, addr["scope6"], - data["hwaddr"])) + (dev, data["up"], addr["ip"], empty, + addr.get("scope6", empty), data["hwaddr"])) if len(data.get('ipv6')) + len(data.get('ipv4')) == 0: tbl.add_row((dev, data["up"], empty, empty, empty, data["hwaddr"])) diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py index d76e768e..1c8a791e 100644 --- a/cloudinit/tests/test_netinfo.py +++ b/cloudinit/tests/test_netinfo.py @@ -11,6 +11,7 @@ from cloudinit.tests.helpers import CiTestCase, mock, readResource # Example ifconfig and route output SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") +SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") @@ -18,6 +19,7 @@ SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") +FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output") class TestNetInfo(CiTestCase): @@ -43,6 +45,18 @@ class TestNetInfo(CiTestCase): content = netdev_pformat() self.assertEqual(NETDEV_FORMATTED_OUT, content) + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + print() + print(content) + print() + self.assertEqual(FREEBSD_NETDEV_OUT, content) + @mock.patch('cloudinit.netinfo.util.which') @mock.patch('cloudinit.netinfo.util.subp') def test_netdev_iproute_pformat(self, m_subp, m_which): diff --git a/tests/data/netinfo/freebsd-ifconfig-output b/tests/data/netinfo/freebsd-ifconfig-output new file mode 100644 index 00000000..3de15a5a --- /dev/null +++ b/tests/data/netinfo/freebsd-ifconfig-output @@ -0,0 +1,17 @@ +vtnet0: flags=8843 metric 0 mtu 1500 + options=6c07bb + ether fa:16:3e:14:1f:99 + hwaddr fa:16:3e:14:1f:99 + inet 10.1.80.61 netmask 0xfffff000 broadcast 10.1.95.255 + nd6 options=29 + media: Ethernet 10Gbase-T + status: active +pflog0: flags=0<> metric 0 mtu 33160 +pfsync0: flags=0<> metric 0 mtu 1500 + syncpeer: 0.0.0.0 maxupd: 128 defer: off +lo0: flags=8049 metric 0 mtu 16384 + options=600003 + inet6 ::1 prefixlen 128 + inet6 fe80::1%lo0 prefixlen 64 scopeid 0x4 + inet 127.0.0.1 netmask 0xff000000 + nd6 options=21 diff --git a/tests/data/netinfo/freebsd-netdev-formatted-output b/tests/data/netinfo/freebsd-netdev-formatted-output new file mode 100644 index 00000000..a9d2ac14 --- /dev/null +++ b/tests/data/netinfo/freebsd-netdev-formatted-output @@ -0,0 +1,11 @@ ++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++ ++---------+-------+----------------+------------+-------+-------------------+ +| Device | Up | Address | Mask | Scope | Hw-Address | ++---------+-------+----------------+------------+-------+-------------------+ +| lo0 | True | 127.0.0.1 | 0xff000000 | . | . | +| lo0 | True | ::1/128 | . | . | . | +| lo0 | True | fe80::1%lo0/64 | . | 0x4 | . | +| pflog0 | False | . | . | . | . | +| pfsync0 | False | . | . | . | . | +| vtnet0 | True | 10.1.80.61 | 0xfffff000 | . | fa:16:3e:14:1f:99 | ++---------+-------+----------------+------------+-------+-------------------+ -- cgit v1.2.3 From 0bb4c74e7f2d008b015d5453a1be88ae807b1f9b Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Thu, 14 Feb 2019 20:37:32 +0000 Subject: EC2: Rewrite network config on AWS Classic instances every boot AWS EC2 instances' network come in 2 basic flavors: Classic and VPC (Virtual Private Cloud). The former has an interesting behavior of having its MAC address changed whenever the instance is stopped/restarted. This behavior is not observed in VPC instances. In Ubuntu 18.04 (Bionic) the network "management" changed from ENI-style (etc/network/interfaces) to netplan, and when using netplan we observe the following block present in /etc/netplan/50-cloud-init.yaml: match: macaddress: aa:bb:cc:dd:ee:ff Jani Ollikainen noticed in Launchpad bug #1802073 that the EC2 Classic instances were booting without network access in Bionic after stop/restart procedure, due to their MAC address change behavior. It was narrowed down to the netplan MAC match block, that kept the old MAC address after stopping and restarting an instance, since the network configuration writing happens by default only once in EC2 instances, in the first boot. This patch changes the network configuration write to every boot in EC2 Classic instances, by checking against the "vpc-id" metadata information provided only in the VPC instances - if we don't have this metadata value, cloud-init will rewrite the network configuration file in every boot. This was tested in an EC2 Classic instance and proved to fix the issue; unit tests were also added for the new method is_classic_instance(). LP: #1802073 Reported-by: Jani Ollikainen Suggested-by: Ryan Harper Co-developed-by: Chad Smith Signed-off-by: Guilherme G. Piccoli --- cloudinit/sources/DataSourceEc2.py | 21 +++++++++++++++++++++ doc/rtd/topics/datasources/ec2.rst | 11 +++++++++++ tests/unittests/test_datasource/test_ec2.py | 24 ++++++++++++++++++++++++ 3 files changed, 56 insertions(+) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index eb6f27b2..4f2f6ccb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -19,6 +19,7 @@ from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util from cloudinit import warnings +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -107,6 +108,19 @@ class DataSourceEc2(sources.DataSource): 'dynamic', {}).get('instance-identity', {}).get('document', {}) return True + def is_classic_instance(self): + """Report if this instance type is Ec2 Classic (non-vpc).""" + if not self.metadata: + # Can return False on inconclusive as we are also called in + # network_config where metadata will be present. + # Secondary call site is in packaging postinst script. + return False + ifaces_md = self.metadata.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + return False + return True + @property def launch_index(self): if not self.metadata: @@ -320,6 +334,13 @@ class DataSourceEc2(sources.DataSource): if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) + # RELEASE_BLOCKER: Xenial debian/postinst needs to add + # EventType.BOOT on upgrade path for classic. + + # Non-VPC (aka Classic) Ec2 instances need to rewrite the + # network config file every boot due to MAC address change. + if self.is_classic_instance(): + self.update_events['network'].add(EventType.BOOT) else: LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst index 64c325d8..76beca92 100644 --- a/doc/rtd/topics/datasources/ec2.rst +++ b/doc/rtd/topics/datasources/ec2.rst @@ -90,4 +90,15 @@ An example configuration with the default values is provided below: max_wait: 120 timeout: 50 +Notes +----- + * There are 2 types of EC2 instances network-wise: VPC ones (Virtual Private + Cloud) and Classic ones (also known as non-VPC). One major difference + between them is that Classic instances have their MAC address changed on + stop/restart operations, so cloud-init will recreate the network config + file for EC2 Classic instances every boot. On VPC instances this file is + generated only in the first boot of the instance. + The check for the instance type is performed by is_classic_instance() + method. + .. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1a5956d9..20d59bfd 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -401,6 +401,30 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.metadata = DEFAULT_METADATA self.assertEqual('my-identity-id', ds.get_instance_id()) + def test_classic_instance_true(self): + """If no vpc-id in metadata, is_classic_instance must return true.""" + md_copy = copy.deepcopy(DEFAULT_METADATA) + ifaces_md = md_copy.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + del mac_data['vpc-id'] + + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': md_copy}) + self.assertTrue(ds.get_data()) + self.assertTrue(ds.is_classic_instance()) + + def test_classic_instance_false(self): + """If vpc-id in metadata, is_classic_instance must return false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + self.assertFalse(ds.is_classic_instance()) + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" -- cgit v1.2.3 From e7a8f81e6eee390ce6920df053bf7467b5e4dbd7 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 22 Feb 2019 09:41:28 +0000 Subject: tests: integration test failure summary to use traceback if empty error When integration tests verification fails, the object returned contains has 'error' and 'traceback' keys. Each key can contain empty strings. If the simplified 'error' message is empty, fallback and use the more verbose full 'traceback' text in the failure summary. --- tests/cloud_tests/verify.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index 9911ecf2..7018f4d5 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -61,12 +61,17 @@ def format_test_failures(test_result): if not test_result['failures']: return '' failure_hdr = ' test failures:' - failure_fmt = ' * {module}.{class}.{function}\n {error}' + failure_fmt = ' * {module}.{class}.{function}\n ' output = [] for failure in test_result['failures']: if not output: output = [failure_hdr] - output.append(failure_fmt.format(**failure)) + msg = failure_fmt.format(**failure) + if failure.get('error'): + msg += failure['error'] + else: + msg += failure.get('traceback', '') + output.append(msg) return '\n'.join(output) -- cgit v1.2.3 From 34f54360fcc1e0f805002a0b639d0a84eb2cb8ee Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 22 Feb 2019 13:26:31 +0000 Subject: azure: Filter list of ssh keys pulled from fabric The Azure data source is expected to expose a list of ssh keys for the user-to-be-provisioned in the crawled metadata. When configured to use the __builtin__ agent this list is built by the WALinuxAgentShim. The shim retrieves the full set of certificates and public keys exposed to the VM from the wireserver, extracts any ssh keys it can, and returns that list. This fix reduces that list of ssh keys to just the ones whose fingerprints appear in the "administrative user" section of the ovf-env.xml file. The Azure control plane exposes other ssh keys to the VM for other reasons, but those should not be added to the authorized_keys file for the provisioned user. --- cloudinit/sources/DataSourceAzure.py | 13 +- cloudinit/sources/helpers/azure.py | 109 ++++++++++----- tests/data/azure/parse_certificates_fingerprints | 4 + tests/data/azure/parse_certificates_pem | 152 +++++++++++++++++++++ tests/data/azure/pubkey_extract_cert | 13 ++ tests/data/azure/pubkey_extract_ssh_key | 1 + .../unittests/test_datasource/test_azure_helper.py | 71 +++++++++- 7 files changed, 322 insertions(+), 41 deletions(-) create mode 100644 tests/data/azure/parse_certificates_fingerprints create mode 100644 tests/data/azure/parse_certificates_pem create mode 100644 tests/data/azure/pubkey_extract_cert create mode 100644 tests/data/azure/pubkey_extract_ssh_key (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a4f998b3..eccbee5a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -627,9 +627,11 @@ class DataSourceAzure(sources.DataSource): if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: self.bounce_network_with_azure_hostname() + pubkey_info = self.cfg.get('_pubkeys', None) metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. - dhclient_lease_file) + dhclient_lease_file, + pubkey_info=pubkey_info) else: metadata_func = self.get_metadata_from_agent @@ -642,6 +644,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False + util.del_file(REPORTED_READY_MARKER_FILE) util.del_file(REPROVISION_MARKER_FILE) return fabric_data @@ -909,13 +912,15 @@ def find_child(node, filter_func): def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. - # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', - # 'path': 'where/to/go'}] + # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', + # 'path': '/where/to/go'}] # # - # ABC/ABC + # ABC/x/y/z # ... # + # Under some circumstances, there may be a element along with the + # Fingerprint and Path. Pass those along if they appear. results = find_child(sshnode, lambda n: n.localName == "PublicKeys") if len(results) == 0: return [] diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index e5696b1f..2829dd20 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -138,9 +138,36 @@ class OpenSSLManager(object): self.certificate = certificate LOG.debug('New certificate generated.') - def parse_certificates(self, certificates_xml): - tag = ElementTree.fromstring(certificates_xml).find( - './/Data') + @staticmethod + def _run_x509_action(action, cert): + cmd = ['openssl', 'x509', '-noout', action] + result, _ = util.subp(cmd, data=cert) + return result + + def _get_ssh_key_from_cert(self, certificate): + pub_key = self._run_x509_action('-pubkey', certificate) + keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + ssh_key, _ = util.subp(keygen_cmd, data=pub_key) + return ssh_key + + def _get_fingerprint_from_cert(self, certificate): + """openssl x509 formats fingerprints as so: + 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ + B6:A8:BF:27:D4:73\n' + + Azure control plane passes that fingerprint as so: + '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + """ + raw_fp = self._run_x509_action('-fingerprint', certificate) + eq = raw_fp.find('=') + octets = raw_fp[eq+1:-1].split(':') + return ''.join(octets) + + def _decrypt_certs_from_xml(self, certificates_xml): + """Decrypt the certificates XML document using the our private key; + return the list of certs and private keys contained in the doc. + """ + tag = ElementTree.fromstring(certificates_xml).find('.//Data') certificates_content = tag.text lines = [ b'MIME-Version: 1.0', @@ -151,32 +178,30 @@ class OpenSSLManager(object): certificates_content.encode('utf-8'), ] with cd(self.tmpdir): - with open('Certificates.p7m', 'wb') as f: - f.write(b'\n'.join(lines)) out, _ = util.subp( - 'openssl cms -decrypt -in Certificates.p7m -inkey' + 'openssl cms -decrypt -in /dev/stdin -inkey' ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' ' -password pass:'.format(**self.certificate_names), - shell=True) - private_keys, certificates = [], [] + shell=True, data=b'\n'.join(lines)) + return out + + def parse_certificates(self, certificates_xml): + """Given the Certificates XML document, return a dictionary of + fingerprints and associated SSH keys derived from the certs.""" + out = self._decrypt_certs_from_xml(certificates_xml) current = [] + keys = {} for line in out.splitlines(): current.append(line) if re.match(r'[-]+END .*?KEY[-]+$', line): - private_keys.append('\n'.join(current)) + # ignore private_keys current = [] elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificates.append('\n'.join(current)) + certificate = '\n'.join(current) + ssh_key = self._get_ssh_key_from_cert(certificate) + fingerprint = self._get_fingerprint_from_cert(certificate) + keys[fingerprint] = ssh_key current = [] - keys = [] - for certificate in certificates: - with cd(self.tmpdir): - public_key, _ = util.subp( - 'openssl x509 -noout -pubkey |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin', - data=certificate, - shell=True) - keys.append(public_key) return keys @@ -206,7 +231,6 @@ class WALinuxAgentShim(object): self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None - self.values = {} self.lease_file = fallback_lease_file def clean_up(self): @@ -328,8 +352,9 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address - def register_with_azure_and_fetch_data(self): - self.openssl_manager = OpenSSLManager() + def register_with_azure_and_fetch_data(self, pubkey_info=None): + if self.openssl_manager is None: + self.openssl_manager = OpenSSLManager() http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') attempts = 0 @@ -347,16 +372,37 @@ class WALinuxAgentShim(object): attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) - public_keys = [] - if goal_state.certificates_xml is not None: + ssh_keys = [] + if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') - public_keys = self.openssl_manager.parse_certificates( + keys_by_fingerprint = self.openssl_manager.parse_certificates( goal_state.certificates_xml) - data = { - 'public-keys': public_keys, - } + ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) self._report_ready(goal_state, http_client) - return data + return {'public-keys': ssh_keys} + + def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): + """cloud-init expects a straightforward array of keys to be dropped + into the user's authorized_keys file. Azure control plane exposes + multiple public keys to the VM via wireserver. Select just the + user's key(s) and return them, ignoring any other certs. + """ + keys = [] + for pubkey in pubkey_info: + if 'value' in pubkey and pubkey['value']: + keys.append(pubkey['value']) + elif 'fingerprint' in pubkey and pubkey['fingerprint']: + fingerprint = pubkey['fingerprint'] + if fingerprint in keys_by_fingerprint: + keys.append(keys_by_fingerprint[fingerprint]) + else: + LOG.warning("ovf-env.xml specified PublicKey fingerprint " + "%s not found in goalstate XML", fingerprint) + else: + LOG.warning("ovf-env.xml specified PublicKey with neither " + "value nor fingerprint: %s", pubkey) + + return keys def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') @@ -373,11 +419,12 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, + pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts) try: - return shim.register_with_azure_and_fetch_data() + return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) finally: shim.clean_up() diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints new file mode 100644 index 00000000..f7293c56 --- /dev/null +++ b/tests/data/azure/parse_certificates_fingerprints @@ -0,0 +1,4 @@ +ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1 +073E19D14D1C799224C6A0FD8DDAB6A8BF27D473 +4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E +929130695289B450FE45DCD5F6EF0CDE69865867 diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem new file mode 100644 index 00000000..3521ea3a --- /dev/null +++ b/tests/data/azure/parse_certificates_pem @@ -0,0 +1,152 @@ +Bag Attributes + localKeyID: 01 00 00 00 + Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP +W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6 +61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz +eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5 +7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ +47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L +Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT +nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5 +lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn +C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb +EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG +x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh ++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU +cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH +gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X +I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB +lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8 +v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed +Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId +0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA +nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe +onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG +WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2 +qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1 +1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt +RyWd+p2lYvFkC/jORQtDMY4uW1o= +-----END PRIVATE KEY----- +Bag Attributes + localKeyID: 02 00 00 00 + Microsoft CSP Name: Microsoft Strong Cryptographic Provider +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4 +FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd +x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW +dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC +gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA +N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua +tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd +0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn +giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61 +LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci +xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh +2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u +n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ +WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+ +R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3 +Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx +E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz +MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9 +SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW +EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9 +8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii +qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU +FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17 +dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz +kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y +R/fA67HXFSTT+OncdRpY1NOn +-----END PRIVATE KEY----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C +k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN +jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe +eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/ +sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo +OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT +bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 01 00 00 00 +subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +-----BEGIN CERTIFICATE----- +MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD +VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES +MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o +Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0 +MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM +CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m +dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB +FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg +ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF +hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI +B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi +quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1 +Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3 +pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg +kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX +R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF +im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e +mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz +Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP +3g== +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 02 00 00 00 +subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted +issuer=/CN=Microsoft.ManagedIdentity +-----BEGIN CERTIFICATE----- +MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL +BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy +MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny +aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz +b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w +dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN +2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee +0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW +2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw +tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw +Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P +AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD +VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB +AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe +7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b +7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0 +jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38 +UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC +pkSoWwF1QAnHn0eokR9E1rU= +-----END CERTIFICATE----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert new file mode 100644 index 00000000..ce9b852d --- /dev/null +++ b/tests/data/azure/pubkey_extract_cert @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key new file mode 100644 index 00000000..54d749ed --- /dev/null +++ b/tests/data/azure/pubkey_extract_ssh_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 26b2b93d..02556165 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1,11 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import unittest2 from textwrap import dedent from cloudinit.sources.helpers import azure as azure_helper from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir +from cloudinit.util import load_file from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim GOAL_STATE_TEMPLATE = """\ @@ -289,6 +291,50 @@ class TestOpenSSLManager(CiTestCase): self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) +class TestOpenSSLManagerActions(CiTestCase): + + def setUp(self): + super(TestOpenSSLManagerActions, self).setUp() + + self.allowed_subp = True + + def _data_file(self, name): + path = 'tests/data/azure' + return os.path.join(path, name) + + @unittest2.skip("todo move to cloud_test") + def test_pubkey_extract(self): + cert = load_file(self._data_file('pubkey_extract_cert')) + good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + sslmgr = azure_helper.OpenSSLManager() + key = sslmgr._get_ssh_key_from_cert(cert) + self.assertEqual(good_key, key) + + good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + fingerprint = sslmgr._get_fingerprint_from_cert(cert) + self.assertEqual(good_fingerprint, fingerprint) + + @unittest2.skip("todo move to cloud_test") + @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + def test_parse_certificates(self, mock_decrypt_certs): + """Azure control plane puts private keys as well as certificates + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. + """ + cert_contents = load_file(self._data_file('parse_certificates_pem')) + fingerprints = load_file(self._data_file( + 'parse_certificates_fingerprints') + ).splitlines() + mock_decrypt_certs.return_value = cert_contents + sslmgr = azure_helper.OpenSSLManager() + keys_by_fp = sslmgr.parse_certificates('') + for fp in keys_by_fp.keys(): + self.assertIn(fp, fingerprints) + for fp in fingerprints: + self.assertIn(fp, keys_by_fp) + + class TestWALinuxAgentShim(CiTestCase): def setUp(self): @@ -329,18 +375,31 @@ class TestWALinuxAgentShim(CiTestCase): def test_certificates_used_to_determine_public_keys(self): shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + """if register_with_azure_and_fetch_data() isn't passed some info about + the user's public keys, there's no point in even trying to parse + the certificates + """ + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, + {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] + certs = {'fp1': 'expected-key', + 'fp2': 'should-not-be-found', + 'fp3': 'expected-no-value-key', + } + sslmgr = self.OpenSSLManager.return_value + sslmgr.parse_certificates.return_value = certs + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], - self.OpenSSLManager.return_value.parse_certificates.call_args_list) - self.assertEqual( - self.OpenSSLManager.return_value.parse_certificates.return_value, - data['public-keys']) + sslmgr.parse_certificates.call_args_list) + self.assertIn('expected-key', data['public-keys']) + self.assertIn('expected-no-value-key', data['public-keys']) + self.assertNotIn('should-not-be-found', data['public-keys']) def test_absent_certificates_produces_empty_public_keys(self): + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] self.GoalState.return_value.certificates_xml = None shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual([], data['public-keys']) def test_correct_url_used_for_report_ready(self): -- cgit v1.2.3 From 8cfcc28db1acc7594dbbf76b846f4964f40f9e63 Mon Sep 17 00:00:00 2001 From: Eric Williams Date: Mon, 25 Feb 2019 19:09:39 +0000 Subject: Enable encrypted_data_bag_secret support for Chef Encrypted data bags require a secrets file to be present to decrypt, and the location of the file must be configured the Chef client configuration file, client.rb. This update enables cloud-init's chef module to update that setting in client.rb. LP: #1817082 --- cloudinit/config/cc_chef.py | 3 +++ doc/examples/cloud-config-chef.txt | 3 +++ templates/chef_client.rb.tmpl | 5 ++++- tests/unittests/test_handler/test_handler_chef.py | 3 +++ 4 files changed, 13 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 46abedd1..a6240306 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -51,6 +51,7 @@ file). chef: client_key: + encrypted_data_bag_secret: environment: file_backup_path: file_cache_path: @@ -114,6 +115,7 @@ CHEF_RB_TPL_DEFAULTS = { 'file_backup_path': "/var/backups/chef", 'pid_file': "/var/run/chef/client.pid", 'show_time': True, + 'encrypted_data_bag_secret': None, } CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) CHEF_RB_TPL_PATH_KEYS = frozenset([ @@ -124,6 +126,7 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([ 'json_attribs', 'file_cache_path', 'pid_file', + 'encrypted_data_bag_secret', ]) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt index defc5a54..2320e01a 100644 --- a/doc/examples/cloud-config-chef.txt +++ b/doc/examples/cloud-config-chef.txt @@ -98,6 +98,9 @@ chef: # to the install script omnibus_version: "12.3.0" + # If encrypted data bags are used, the client needs to have a secrets file + # configured to decrypt them + encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret" # Capture all subprocess output into a logfile # Useful for troubleshooting cloud-init issues diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl index cbb6b15f..99978d3b 100644 --- a/templates/chef_client.rb.tmpl +++ b/templates/chef_client.rb.tmpl @@ -1,6 +1,6 @@ ## template:jinja {# -This file is only utilized if the module 'cc_chef' is enabled in +This file is only utilized if the module 'cc_chef' is enabled in cloud-config. Specifically, in order to enable it you need to add the following to config: chef: @@ -56,3 +56,6 @@ pid_file "{{pid_file}}" {% if show_time %} Chef::Log::Formatter.show_time = true {% endif %} +{% if encrypted_data_bag_secret %} +encrypted_data_bag_secret "{{encrypted_data_bag_secret}}" +{% endif %} diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index b16532ea..f4311268 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -145,6 +145,7 @@ class TestChef(FilesystemMockingTestCase): file_backup_path "/var/backups/chef" pid_file "/var/run/chef/client.pid" Chef::Log::Formatter.show_time = true + encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" """ tpl_file = util.load_file('templates/chef_client.rb.tmpl') self.patchUtils(self.tmp) @@ -157,6 +158,8 @@ class TestChef(FilesystemMockingTestCase): 'validation_name': 'bob', 'validation_key': "/etc/chef/vkey.pem", 'validation_cert': "this is my cert", + 'encrypted_data_bag_secret': + '/etc/chef/encrypted_data_bag_secret' }, } cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) -- cgit v1.2.3 From 1182ad5f9362e1570c622345a3ac996c07eb2eeb Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 26 Feb 2019 15:37:36 +0000 Subject: tests: fix some slow tests and some leaking state In test_ds_identify, don't mutate otherwise-static test data. When running tests in a random order, this was causing failures due to breaking preconditions for other tests. In tests/helpers, reset logging level in tearDown. Some of the CLI tests set the level of the root logger in a way that isn't correctly reset. For test_poll_imds_re_dhcp_on_timeout and test_dhcp_discovery_run_in_sandbox_warns_invalid_pid, mock out time.sleep; this saves ~11 seconds (or ~40% of previous test time!). --- cloudinit/net/tests/test_dhcp.py | 1 + cloudinit/tests/helpers.py | 1 + tests/unittests/test_datasource/test_azure.py | 1 + tests/unittests/test_ds_identify.py | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 79e8842f..51390249 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -117,6 +117,7 @@ class TestDHCPDiscoveryClean(CiTestCase): self.assertEqual('eth9', call[0][1]) self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp, diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 2eb7b0cd..46a49416 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -207,6 +207,7 @@ class CiTestCase(TestCase): if self.with_logs: # Remove the handler we setup logging.getLogger().handlers = self.old_handlers + logging.getLogger().level = None util.subp = _real_subp super(CiTestCase, self).tearDown() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 417d86a9..5edf36e8 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1692,6 +1692,7 @@ class TestPreprovisioningPollIMDS(CiTestCase): self.paths = helpers.Paths({'cloud_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch(MOCKPATH + 'EphemeralDHCPv4') def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, report_ready_func, fake_resp, m_media_switch, m_dhcp, diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 756b4fb4..d00c1b4b 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -441,7 +441,7 @@ class TestDsIdentify(DsIdentifyBase): nova does not identify itself on platforms other than intel. https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova""" - data = VALID_CFG['OpenStack'].copy() + data = copy.deepcopy(VALID_CFG['OpenStack']) del data['files'][P_PRODUCT_NAME] data.update({'policy_dmi': POLICY_FOUND_OR_MAYBE, 'policy_no_dmi': POLICY_FOUND_OR_MAYBE}) -- cgit v1.2.3 From bd35300ba36bd63686715fa9661516a518781f6d Mon Sep 17 00:00:00 2001 From: Kurt Stieger Date: Mon, 4 Mar 2019 15:54:25 +0000 Subject: net: append type:dhcp[46] only if dhcp[46] is True in v2 netconfig When providing netplan configuration to cloud-init, the internal network state would enable DHCP if the 'dhcp' key was present in the source config. In netplan, dhcp[46] is a boolean and the value of the boolean should control whether DHCP is enabled rather than the presence of the key. This issue leaded to inconsistant sysconfig/network-scripts on fedora. 'BOOTPROTO' was always 'dhcp', even if the address config was static. After this change a dhcp subnet is added only if the 'dhcp' setting in source cfg dict is True. LP: #1818032 --- cloudinit/net/network_state.py | 4 +-- tests/unittests/test_net.py | 61 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index f76e508a..539b76d8 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -706,9 +706,9 @@ class NetworkStateInterpreter(object): """Common ipconfig extraction from v2 to v1 subnets array.""" subnets = [] - if 'dhcp4' in cfg: + if cfg.get('dhcp4'): subnets.append({'type': 'dhcp4'}) - if 'dhcp6' in cfg: + if cfg.get('dhcp6'): self.use_ipv6 = True subnets.append({'type': 'dhcp6'}) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index f001ae5a..e3b9e02b 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -406,6 +406,23 @@ network: - maas """ +NETPLAN_DHCP_FALSE = """ +version: 2 +ethernets: + ens3: + match: + macaddress: 52:54:00:ab:cd:ef + dhcp4: false + dhcp6: false + addresses: + - 192.168.42.100/24 + - 2001:db8::100/32 + gateway4: 192.168.42.1 + gateway6: 2001:db8::1 + nameservers: + search: [example.com] + addresses: [192.168.42.53, 1.1.1.1] +""" # Examples (and expected outputs for various renderers). OS_SAMPLES = [ @@ -2590,6 +2607,50 @@ USERCTL=no config = sysconfig.ConfigObj(nm_cfg) self.assertIn('ifcfg-rh', config['main']['plugins']) + def test_netplan_dhcp_false_disable_dhcp_in_state(self): + """netplan config with dhcp[46]: False should not add dhcp in state""" + net_config = yaml.load(NETPLAN_DHCP_FALSE) + ns = network_state.parse_net_config_data(net_config, + skip_broken=False) + + dhcp_found = [snet for iface in ns.iter_interfaces() + for snet in iface['subnets'] if 'dhcp' in snet['type']] + + self.assertEqual([], dhcp_found) + + def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): + """netplan cfg with dhcp[46]: False should not have bootproto=dhcp""" + + entry = { + 'yaml': NETPLAN_DHCP_FALSE, + 'expected_sysconfig': { + 'ifcfg-ens3': textwrap.dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=ens3 + DNS1=192.168.42.53 + DNS2=1.1.1.1 + DOMAIN=example.com + GATEWAY=192.168.42.1 + HWADDR=52:54:00:ab:cd:ef + IPADDR=192.168.42.100 + IPV6ADDR=2001:db8::100/32 + IPV6INIT=yes + IPV6_DEFAULTGW=2001:db8::1 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + } + } + + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._assert_headers(found) + class TestOpenSuseSysConfigRendering(CiTestCase): -- cgit v1.2.3 From eee0e09ead3d11c32e8888d13d164810ee5f19d6 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 4 Mar 2019 16:50:31 +0000 Subject: tip-pylint: Fix assignment-from-return-none errors pylint now complains about assignment of None from a return of a function call. This does not account for subclassing so we resolve this issue by removing the assignment in the unittest. --- tests/unittests/test_datasource/test_configdrive.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'tests') diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index dcdabea5..7a6802f6 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -268,8 +268,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', side_effect=exists_side_effect())) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) self.assertEqual(exists_mock.call_count, 2) @@ -296,8 +295,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', return_value=True)) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) exists_mock.assert_called_once_with(mock.ANY) @@ -331,8 +329,7 @@ class TestConfigDriveDataSource(CiTestCase): yield True with mock.patch.object(os.path, 'exists', side_effect=exists_side_effect()): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) # We don't assert the call count for os.path.exists() because # not all of the entries in name_tests results in two calls to # that function. Specifically, 'root2k' doesn't seem to call @@ -359,8 +356,7 @@ class TestConfigDriveDataSource(CiTestCase): } for name, dev_name in name_tests.items(): with mock.patch.object(os.path, 'exists', return_value=True): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) def test_dir_valid(self): """Verify a dir is read as such.""" -- cgit v1.2.3 From edf052c3196139169ecbfe98049c278f4babc8ca Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Mar 2019 18:21:59 +0000 Subject: drop Python 2.6 support and our NIH version detection - Remove the last few places that use `if PY26` - Replace our Python version detection logic with six's (which we were already using in most places) --- cloudinit/tests/helpers.py | 22 +--------------------- cloudinit/util.py | 4 ---- tests/unittests/test_datasource/test_azure.py | 4 +--- 3 files changed, 2 insertions(+), 28 deletions(-) (limited to 'tests') diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 46a49416..f41180fd 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -41,26 +41,6 @@ _real_subp = util.subp SkipTest = unittest2.SkipTest skipIf = unittest2.skipIf -# Used for detecting different python versions -PY2 = False -PY26 = False -PY27 = False -PY3 = False - -_PY_VER = sys.version_info -_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3] -if (_PY_MAJOR, _PY_MINOR) <= (2, 6): - if (_PY_MAJOR, _PY_MINOR) == (2, 6): - PY26 = True - if (_PY_MAJOR, _PY_MINOR) >= (2, 0): - PY2 = True -else: - if (_PY_MAJOR, _PY_MINOR) == (2, 7): - PY27 = True - PY2 = True - if (_PY_MAJOR, _PY_MINOR) >= (3, 0): - PY3 = True - # Makes the old path start # with new base instead of whatever @@ -357,7 +337,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchOpen(self, new_root): trap_func = retarget_many_wrapper(new_root, 1, open) - name = 'builtins.open' if PY3 else '__builtin__.open' + name = 'builtins.open' if six.PY3 else '__builtin__.open' self.patched_funcs.enter_context(mock.patch(name, trap_func)) def patchStdoutAndStderr(self, stdout=None, stderr=None): diff --git a/cloudinit/util.py b/cloudinit/util.py index e5403f7d..a192091f 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -72,7 +72,6 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], PROC_CMDLINE = None _LSB_RELEASE = {} -PY26 = sys.version_info[0:2] == (2, 6) def get_architecture(target=None): @@ -2815,9 +2814,6 @@ def load_shell_content(content, add_empty=False, empty_val=None): variables. Set their value to empty_val.""" def _shlex_split(blob): - if PY26 and isinstance(blob, six.text_type): - # Older versions don't support unicode input - blob = blob.encode("utf8") return shlex.split(blob, comments=True) data = {} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 5edf36e8..6b05b8f1 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -11,7 +11,7 @@ from cloudinit.util import (b64e, decode_binary, load_file, write_file, from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack, PY26, SkipTest) + ExitStack) import crypt import httpretty @@ -221,8 +221,6 @@ class TestAzureDataSource(CiTestCase): def setUp(self): super(TestAzureDataSource, self).setUp() - if PY26: - raise SkipTest("Does not work on python 2.6") self.tmp = self.tmp_dir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty -- cgit v1.2.3 From 5352dd99eb2937b4eaaaf596b40ad7ca69d87f64 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Mar 2019 18:41:05 +0000 Subject: helpers/openstack: Treat unknown link types as physical Some deployments of OpenStack expose link types to the guest which cloud-init doesn't recognise. These will almost always be physical, so we can operate more robustly if we assume that they are (whilst warning the user that we're seeing something unexpected). LP: #1639263 --- cloudinit/sources/helpers/openstack.py | 12 +++++------ .../unittests/test_datasource/test_configdrive.py | 23 ++++++++++++++++++++++ 2 files changed, 29 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9c29ceac..8f069115 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -67,7 +67,7 @@ OS_VERSIONS = ( OS_ROCKY, ) -PHYSICAL_TYPES = ( +KNOWN_PHYSICAL_TYPES = ( None, 'bgpovs', # not present in OpenStack upstream but used on OVH cloud. 'bridge', @@ -600,9 +600,7 @@ def convert_net_json(network_json=None, known_macs=None): subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in PHYSICAL_TYPES: - cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) - elif link['type'] in ['bond']: + if link['type'] in ['bond']: params = {} if link_mac_addr: params['mac_address'] = link_mac_addr @@ -641,8 +639,10 @@ def convert_net_json(network_json=None, known_macs=None): curinfo.update({'mac': link['vlan_mac_address'], 'name': name}) else: - raise ValueError( - 'Unknown network_data link type: %s' % link['type']) + if link['type'] not in KNOWN_PHYSICAL_TYPES: + LOG.warning('Unknown network_data link type (%s); treating as' + ' physical', link['type']) + cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) config.append(cfg) link_id_info[curinfo['id']] = curinfo diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 7a6802f6..520c50fe 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -600,6 +600,9 @@ class TestNetJson(CiTestCase): class TestConvertNetworkData(CiTestCase): + + with_logs = True + def setUp(self): super(TestConvertNetworkData, self).setUp() self.tmp = self.tmp_dir() @@ -726,6 +729,26 @@ class TestConvertNetworkData(CiTestCase): 'enp0s2': 'fa:16:3e:d4:57:ad'} self.assertEqual(expected, config_name2mac) + def test_unknown_device_types_accepted(self): + # If we don't recognise a link, we should treat it as physical for a + # best-effort boot + my_netdata = deepcopy(NETWORK_DATA) + my_netdata['links'][0]['type'] = 'my-special-link-type' + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + # We should, however, warn the user that we don't recognise the type + self.assertIn('Unknown network_data link type (my-special-link-type)', + self.logs.getvalue()) + def cfg_ds_from_dir(base_d, files=None): run = os.path.join(base_d, "run") -- cgit v1.2.3 From 7c07af289b77ce9ae2e20c6f2638a54e63f016ef Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 6 Mar 2019 20:23:35 +0000 Subject: Support locking user with usermod if passwd is not available. In some cases, the 'passwd' command might not be available, but 'usermod' might be. In debian systems both are provided by the 'passwd' package. In Redhat/Centos passwd comes from 'passwd' package while 'usermod' comes from `shadow-utils` This should just support either one with no real cost other than the check. --- cloudinit/distros/__init__.py | 13 +++++++---- tests/unittests/test_distros/test_create_users.py | 28 +++++++++++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) (limited to 'tests') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index ef618c28..20c994dc 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -577,11 +577,16 @@ class Distro(object): """ Lock the password of a user, i.e., disable password logins """ + # passwd must use short '-l' due to SLES11 lacking long form '--lock' + lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name]) try: - # Need to use the short option name '-l' instead of '--lock' - # (which would be more descriptive) since SLES 11 doesn't know - # about long names. - util.subp(['passwd', '-l', name]) + cmd = next(l for l in lock_tools if util.which(l[0])) + except StopIteration: + raise RuntimeError(( + "Unable to lock user account '%s'. No tools available. " + " Tried: %s.") % (name, [c[0] for c in lock_tools])) + try: + util.subp(cmd) except Exception as e: util.logexc(LOG, 'Failed to disable password for user %s', name) raise e diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index c3f258d5..40624952 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -240,4 +240,32 @@ class TestCreateUser(CiTestCase): [mock.call(set(['auth1']), user), # not disabled mock.call(set(['key1']), 'foouser', options=disable_prefix)]) + @mock.patch("cloudinit.distros.util.which") + def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp, + m_is_snappy): + """Lock uses usermod --lock if no 'passwd' cmd available.""" + m_which.side_effect = lambda m: m in ('usermod',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['usermod', '--lock', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.util.which") + def test_lock_with_passwd_if_available(self, m_which, m_subp, + m_is_snappy): + """Lock with only passwd will use passwd.""" + m_which.side_effect = lambda m: m in ('passwd',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['passwd', '-l', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.util.which") + def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp, + m_is_snappy): + """Lock with no commands available raises RuntimeError.""" + m_which.return_value = None + with self.assertRaises(RuntimeError): + self.dist.lock_passwd("bob") + # vi: ts=4 expandtab -- cgit v1.2.3 From 3acaacc92be1b7d7bad099c323d6e923664a8afa Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Tue, 12 Mar 2019 21:08:22 +0000 Subject: net/sysconfig: Handle default route setup for dhcp configured NICs When the network configuration has a default route configured and another network device that is configured with dhcp, SUSE sysconfig output should not accept the default route provided by the dhcp server. LP: #1812117 --- cloudinit/net/network_state.py | 41 +++++++++++++++++++++------ cloudinit/net/sysconfig.py | 31 +++++++++++++++------ tests/unittests/test_net.py | 63 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 118 insertions(+), 17 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 539b76d8..4d19f562 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -148,6 +148,7 @@ class NetworkState(object): self._network_state = copy.deepcopy(network_state) self._version = version self.use_ipv6 = network_state.get('use_ipv6', False) + self._has_default_route = None @property def config(self): @@ -157,14 +158,6 @@ class NetworkState(object): def version(self): return self._version - def iter_routes(self, filter_func=None): - for route in self._network_state.get('routes', []): - if filter_func is not None: - if filter_func(route): - yield route - else: - yield route - @property def dns_nameservers(self): try: @@ -179,6 +172,12 @@ class NetworkState(object): except KeyError: return [] + @property + def has_default_route(self): + if self._has_default_route is None: + self._has_default_route = self._maybe_has_default_route() + return self._has_default_route + def iter_interfaces(self, filter_func=None): ifaces = self._network_state.get('interfaces', {}) for iface in six.itervalues(ifaces): @@ -188,6 +187,32 @@ class NetworkState(object): if filter_func(iface): yield iface + def iter_routes(self, filter_func=None): + for route in self._network_state.get('routes', []): + if filter_func is not None: + if filter_func(route): + yield route + else: + yield route + + def _maybe_has_default_route(self): + for route in self.iter_routes(): + if self._is_default_route(route): + return True + for iface in self.iter_interfaces(): + for subnet in iface.get('subnets', []): + for route in subnet.get('routes', []): + if self._is_default_route(route): + return True + return False + + def _is_default_route(self, route): + default_nets = ('::', '0.0.0.0') + return ( + route.get('prefix') == 0 + and route.get('network') in default_nets + ) + @six.add_metaclass(CommandHandlerMeta) class NetworkStateInterpreter(object): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 19b3e60c..e59753d5 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -322,7 +322,7 @@ class Renderer(renderer.Renderer): iface_cfg[new_key] = old_value @classmethod - def _render_subnets(cls, iface_cfg, subnets): + def _render_subnets(cls, iface_cfg, subnets, has_default_route): # setting base values iface_cfg['BOOTPROTO'] = 'none' @@ -331,6 +331,7 @@ class Renderer(renderer.Renderer): mtu_key = 'MTU' subnet_type = subnet.get('type') if subnet_type == 'dhcp6': + # TODO need to set BOOTPROTO to dhcp6 on SUSE iface_cfg['IPV6INIT'] = True iface_cfg['DHCPV6C'] = True elif subnet_type in ['dhcp4', 'dhcp']: @@ -375,9 +376,9 @@ class Renderer(renderer.Renderer): ipv6_index = -1 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': - continue - elif subnet_type in ['dhcp4', 'dhcp']: + if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: + if has_default_route and iface_cfg['BOOTPROTO'] != 'none': + iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False continue elif subnet_type == 'static': if subnet_is_ipv6(subnet): @@ -443,6 +444,8 @@ class Renderer(renderer.Renderer): # TODO(harlowja): add validation that no other iface has # also provided the default route? iface_cfg['DEFROUTE'] = True + if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'): + iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True if 'gateway' in route: if is_ipv6 or is_ipv6_addr(route['gateway']): iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] @@ -493,7 +496,9 @@ class Renderer(renderer.Renderer): iface_cfg = iface_contents[iface_name] route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod @@ -518,7 +523,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) # iter_interfaces on network-state is not sorted to produce @@ -547,7 +554,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @staticmethod @@ -608,7 +617,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod @@ -620,7 +631,9 @@ class Renderer(renderer.Renderer): iface_cfg.kind = 'infiniband' iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e3b9e02b..468d544a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -860,6 +860,7 @@ NETWORK_CONFIGS = { BOOTPROTO=dhcp DEFROUTE=yes DEVICE=eth99 + DHCLIENT_SET_DEFAULT_ROUTE=yes DNS1=8.8.8.8 DNS2=8.8.4.4 DOMAIN="barley.maas sach.maas" @@ -1234,6 +1235,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true 'ifcfg-bond0.200': textwrap.dedent("""\ BOOTPROTO=dhcp DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no NM_CONTROLLED=no ONBOOT=yes PHYSDEV=bond0 @@ -1333,6 +1335,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true 'ifcfg-eth5': textwrap.dedent("""\ BOOTPROTO=dhcp DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no HWADDR=98:bb:9f:2c:e8:8a NM_CONTROLLED=no ONBOOT=no @@ -1988,6 +1991,23 @@ CONFIG_V1_SIMPLE_SUBNET = { 'type': 'static'}], 'type': 'physical'}]} +CONFIG_V1_MULTI_IFACE = { + 'version': 1, + 'config': [{'type': 'physical', + 'mtu': 1500, + 'subnets': [{'type': 'static', + 'netmask': '255.255.240.0', + 'routes': [{'netmask': '0.0.0.0', + 'network': '0.0.0.0', + 'gateway': '51.68.80.1'}], + 'address': '51.68.89.122', + 'ipv4': True}], + 'mac_address': 'fa:16:3e:25:b4:59', + 'name': 'eth0'}, + {'type': 'physical', + 'mtu': 9000, + 'subnets': [{'type': 'dhcp4'}], + 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]} DEFAULT_DEV_ATTRS = { 'eth1000': { @@ -2460,6 +2480,49 @@ USERCTL=no respath = '/etc/resolv.conf' self.assertNotIn(respath, found.keys()) + def test_network_config_v1_multi_iface_samples(self): + ns = network_state.parse_net_config_data(CONFIG_V1_MULTI_IFACE) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network-scripts/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected_i1 = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=51.68.80.1 +HWADDR=fa:16:3e:25:b4:59 +IPADDR=51.68.89.122 +MTU=1500 +NETMASK=255.255.240.0 +NM_CONTROLLED=no +ONBOOT=yes +STARTMODE=auto +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0']) + expected_i2 = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth1 +DHCLIENT_SET_DEFAULT_ROUTE=no +HWADDR=fa:16:3e:b1:ca:29 +MTU=9000 +NM_CONTROLLED=no +ONBOOT=yes +STARTMODE=auto +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1']) + def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") -- cgit v1.2.3 From f2fd6eac4407e60d0e98826ab03847dda4cde138 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 14 Mar 2019 23:06:47 +0000 Subject: DataSource: move update_events from a class to an instance attribute Currently, DataSourceAzure updates self.update_events in __init__. As update_events is a class attribute on DataSource, this updates it for all instances of classes derived from DataSource including those for other clouds. This means that if DataSourceAzure is even instantiated, its behaviour is applied to whichever data source ends up being used for boot. To address this, update_events is moved from a class attribute to an instance attribute (that is therefore populated at instantiation time). This retains the defaults for all DataSource sub-class instances, but avoids them being able to mutate the state in instances of other DataSource sub-classes. update_events is only ever referenced on an instance of DataSource (or a sub-class); no code relies on it being a class attribute. (In fact, it's only used within methods on DataSource or its sub-classes, so it doesn't even _need_ to remain public, though I think it's appropriate for it to be public.) DataSourceScaleway is also updated to move update_events from a class attribute to an instance attribute, as the class attribute would now be masked by the DataSource instance attribute. LP: #1819913 --- cloudinit/sources/DataSourceScaleway.py | 3 ++- cloudinit/sources/__init__.py | 6 +++--- cloudinit/sources/tests/test_init.py | 15 +++++++++++++++ tests/unittests/test_datasource/test_scaleway.py | 7 +++++++ 4 files changed, 27 insertions(+), 4 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b573b382..54bfc1fe 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -171,10 +171,11 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" - update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) + self.update_events = { + 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6966b31..1604932d 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -164,9 +164,6 @@ class DataSource(object): # A datasource which supports writing network config on each system boot # would call update_events['network'].add(EventType.BOOT). - # Default: generate network config on new instance id (first boot). - update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} - # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute # values are reset via clear_cached_attrs during any update_metadata call. @@ -191,6 +188,9 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None + # Default: generate network config on new instance id (first boot). + self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} + self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 6378e98b..cb1912be 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -575,6 +575,21 @@ class TestDataSource(CiTestCase): " events: New instance first boot", self.logs.getvalue()) + def test_data_sources_cant_mutate_update_events_for_others(self): + """update_events shouldn't be changed for other DSes (LP: #1819913)""" + + class ModifyingDS(DataSource): + + def __init__(self, sys_cfg, distro, paths): + # This mirrors what DataSourceAzure does which causes LP: + # #1819913 + DataSource.__init__(self, sys_cfg, distro, paths) + self.update_events['network'].add(EventType.BOOT) + + before_update_events = copy.deepcopy(self.datasource.update_events) + ModifyingDS(self.sys_cfg, self.distro, self.paths) + self.assertEqual(before_update_events, self.datasource.update_events) + class TestRedactSensitiveData(CiTestCase): diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index f96bf0a2..3bfd7527 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,6 +7,7 @@ import requests from cloudinit import helpers from cloudinit import settings +from cloudinit.event import EventType from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -403,3 +404,9 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') + + def test_update_events_is_correct(self): + """ensure update_events contains correct data""" + self.assertEqual( + {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, + self.datasource.update_events) -- cgit v1.2.3 From 5e5894d68d21bf33649aca36973a0ef2fe72f01d Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 19 Mar 2019 14:24:37 +0000 Subject: Add ubuntu_drivers config module The ubuntu_drivers config module enables usage of the 'ubuntu-drivers' command. At this point it only serves as a way of installing NVIDIA drivers for general purpose graphics processing unit (GPGPU) functionality. Also, a small usability improvement to get_cfg_by_path to allow it to take a string for the key path "toplevel/second/mykey" in addition to the original: ("toplevel", "second", "mykey") --- cloudinit/config/cc_ubuntu_drivers.py | 112 +++++++++++++++++ cloudinit/config/tests/test_ubuntu_drivers.py | 174 ++++++++++++++++++++++++++ cloudinit/util.py | 15 +++ config/cloud.cfg.tmpl | 3 + doc/rtd/topics/modules.rst | 1 + tests/unittests/test_handler/test_schema.py | 1 + 6 files changed, 306 insertions(+) create mode 100644 cloudinit/config/cc_ubuntu_drivers.py create mode 100644 cloudinit/config/tests/test_ubuntu_drivers.py (limited to 'tests') diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py new file mode 100644 index 00000000..91feb603 --- /dev/null +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -0,0 +1,112 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Ubuntu Drivers: Interact with third party drivers in Ubuntu.""" + +from textwrap import dedent + +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) +from cloudinit import log as logging +from cloudinit.settings import PER_INSTANCE +from cloudinit import type_utils +from cloudinit import util + +LOG = logging.getLogger(__name__) + +frequency = PER_INSTANCE +distros = ['ubuntu'] +schema = { + 'id': 'cc_ubuntu_drivers', + 'name': 'Ubuntu Drivers', + 'title': 'Interact with third party drivers in Ubuntu.', + 'description': dedent("""\ + This module interacts with the 'ubuntu-drivers' command to install + third party driver packages."""), + 'distros': distros, + 'examples': [dedent("""\ + drivers: + nvidia: + license-accepted: true + """)], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'drivers': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'nvidia': { + 'type': 'object', + 'additionalProperties': False, + 'required': ['license-accepted'], + 'properties': { + 'license-accepted': { + 'type': 'boolean', + 'description': ("Do you accept the NVIDIA driver" + " license?"), + }, + 'version': { + 'type': 'string', + 'description': ( + 'The version of the driver to install (e.g.' + ' "390", "410"). Defaults to the latest' + ' version.'), + }, + }, + }, + }, + }, + }, +} +OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = ( + "ubuntu-drivers: error: argument : invalid choice: 'install'") + +__doc__ = get_schema_doc(schema) # Supplement python help() + + +def install_drivers(cfg, pkg_install_func): + if not isinstance(cfg, dict): + raise TypeError( + "'drivers' config expected dict, found '%s': %s" % + (type_utils.obj_name(cfg), cfg)) + + cfgpath = 'nvidia/license-accepted' + # Call translate_bool to ensure that we treat string values like "yes" as + # acceptance and _don't_ treat string values like "nah" as acceptance + # because they're True-ish + nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath)) + if not nv_acc: + LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc) + return + + if not util.which('ubuntu-drivers'): + LOG.debug("'ubuntu-drivers' command not available. " + "Installing ubuntu-drivers-common") + pkg_install_func(['ubuntu-drivers-common']) + + driver_arg = 'nvidia' + version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version') + if version_cfg: + driver_arg += ':{}'.format(version_cfg) + + LOG.debug("Installing NVIDIA drivers (%s=%s, version=%s)", + cfgpath, nv_acc, version_cfg if version_cfg else 'latest') + + try: + util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg]) + except util.ProcessExecutionError as exc: + if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr: + LOG.warning('the available version of ubuntu-drivers is' + ' too old to perform requested driver installation') + elif 'No drivers found for installation.' in exc.stdout: + LOG.warning('ubuntu-drivers found no drivers for installation') + raise + + +def handle(name, cfg, cloud, log, _args): + if "drivers" not in cfg: + log.debug("Skipping module named %s, no 'drivers' key in config", name) + return + + validate_cloudconfig_schema(cfg, schema) + install_drivers(cfg['drivers'], cloud.distro.install_packages) diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py new file mode 100644 index 00000000..efba4ce7 --- /dev/null +++ b/cloudinit/config/tests/test_ubuntu_drivers.py @@ -0,0 +1,174 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy + +from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock +from cloudinit.config.schema import ( + SchemaValidationError, validate_cloudconfig_schema) +from cloudinit.config import cc_ubuntu_drivers as drivers +from cloudinit.util import ProcessExecutionError + +MPATH = "cloudinit.config.cc_ubuntu_drivers." +OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( + "ubuntu-drivers: error: argument : invalid choice: 'install' " + "(choose from 'list', 'autoinstall', 'devices', 'debug')\n") + + +class TestUbuntuDrivers(CiTestCase): + cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'] + + with_logs = True + + @skipUnlessJsonSchema() + def test_schema_requires_boolean_for_license_accepted(self): + with self.assertRaisesRegex( + SchemaValidationError, ".*license-accepted.*TRUE.*boolean"): + validate_cloudconfig_schema( + {'drivers': {'nvidia': {'license-accepted': "TRUE"}}}, + schema=drivers.schema, strict=True) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def _assert_happy_path_taken(self, config, m_which, m_subp): + """Positive path test through handle. Package should be installed.""" + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_handle_does_package_install(self): + self._assert_happy_path_taken(self.cfg_accepted) + + def test_trueish_strings_are_considered_approval(self): + for true_value in ['yes', 'true', 'on', '1']: + new_config = copy.deepcopy(self.cfg_accepted) + new_config['drivers']['nvidia']['license-accepted'] = true_value + self._assert_happy_path_taken(new_config) + + @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( + stdout='No drivers found for installation.\n', exit_code=1)) + @mock.patch(MPATH + "util.which", return_value=False) + def test_handle_raises_error_if_no_drivers_found(self, m_which, m_subp): + """If ubuntu-drivers doesn't install any drivers, raise an error.""" + myCloud = mock.MagicMock() + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('ubuntu-drivers found no drivers for installation', + self.logs.getvalue()) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def _assert_inert_with_config(self, config, m_which, m_subp): + """Helper to reduce repetition when testing negative cases""" + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual(0, myCloud.distro.install_packages.call_count) + self.assertEqual(0, m_subp.call_count) + + def test_handle_inert_if_license_not_accepted(self): + """Ensure we don't do anything if the license is rejected.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': False}}}) + + def test_handle_inert_if_garbage_in_license_field(self): + """Ensure we don't do anything if unknown text is in license field.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': 'garbage'}}}) + + def test_handle_inert_if_no_license_key(self): + """Ensure we don't do anything if no license key.""" + self._assert_inert_with_config({'drivers': {'nvidia': {}}}) + + def test_handle_inert_if_no_nvidia_key(self): + """Ensure we don't do anything if other license accepted.""" + self._assert_inert_with_config( + {'drivers': {'acme': {'license-accepted': True}}}) + + def test_handle_inert_if_string_given(self): + """Ensure we don't do anything if string refusal given.""" + for false_value in ['no', 'false', 'off', '0']: + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': false_value}}}) + + @mock.patch(MPATH + "install_drivers") + def test_handle_no_drivers_does_nothing(self, m_install_drivers): + """If no 'drivers' key in the config, nothing should be done.""" + myCloud = mock.MagicMock() + myLog = mock.MagicMock() + drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None) + self.assertIn('Skipping module named', + myLog.debug.call_args_list[0][0][0]) + self.assertEqual(0, m_install_drivers.call_count) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=True) + def test_install_drivers_no_install_if_present(self, m_which, m_subp): + """If 'ubuntu-drivers' is present, no package install should occur.""" + pkg_install = mock.MagicMock() + drivers.install_drivers(self.cfg_accepted['drivers'], + pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + self.assertEqual([mock.call('ubuntu-drivers')], + m_which.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_install_drivers_rejects_invalid_config(self): + """install_drivers should raise TypeError if not given a config dict""" + pkg_install = mock.MagicMock() + with self.assertRaisesRegex(TypeError, ".*expected dict.*"): + drivers.install_drivers("mystring", pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + + @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( + stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2)) + @mock.patch(MPATH + "util.which", return_value=False) + def test_install_drivers_handles_old_ubuntu_drivers_gracefully( + self, m_which, m_subp): + """Older ubuntu-drivers versions should emit message and raise error""" + myCloud = mock.MagicMock() + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('WARNING: the available version of ubuntu-drivers is' + ' too old to perform requested driver installation', + self.logs.getvalue()) + + +# Sub-class TestUbuntuDrivers to run the same test cases, but with a version +class TestUbuntuDriversWithVersion(TestUbuntuDrivers): + cfg_accepted = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def test_version_none_uses_latest(self, m_which, m_subp): + myCloud = mock.MagicMock() + version_none_cfg = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}} + drivers.handle( + 'ubuntu_drivers', version_none_cfg, myCloud, None, None) + self.assertEqual( + [mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])], + m_subp.call_args_list) + + def test_specifying_a_version_doesnt_override_license_acceptance(self): + self._assert_inert_with_config({ + 'drivers': {'nvidia': {'license-accepted': False, + 'version': '123'}} + }) + +# vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index a192091f..385f231c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -703,6 +703,21 @@ def get_cfg_option_list(yobj, key, default=None): # get a cfg entry by its path array # for f['a']['b']: get_cfg_by_path(mycfg,('a','b')) def get_cfg_by_path(yobj, keyp, default=None): + """Return the value of the item at path C{keyp} in C{yobj}. + + example: + get_cfg_by_path({'a': {'b': {'num': 4}}}, 'a/b/num') == 4 + get_cfg_by_path({'a': {'b': {'num': 4}}}, 'c/d') == None + + @param yobj: A dictionary. + @param keyp: A path inside yobj. it can be a '/' delimited string, + or an iterable. + @param default: The default to return if the path does not exist. + @return: The value of the item at keyp." + is not found.""" + + if isinstance(keyp, six.string_types): + keyp = keyp.split("/") cur = yobj for tok in keyp: if tok not in cur: diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 7513176b..25db43e0 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -112,6 +112,9 @@ cloud_final_modules: - landscape - lxd {% endif %} +{% if variant in ["ubuntu", "unknown"] %} + - ubuntu-drivers +{% endif %} {% if variant not in ["freebsd"] %} - puppet - chef diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index d9720f6a..3dcdd3bc 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -54,6 +54,7 @@ Modules .. automodule:: cloudinit.config.cc_ssh_import_id .. automodule:: cloudinit.config.cc_timezone .. automodule:: cloudinit.config.cc_ubuntu_advantage +.. automodule:: cloudinit.config.cc_ubuntu_drivers .. automodule:: cloudinit.config.cc_update_etc_hosts .. automodule:: cloudinit.config.cc_update_hostname .. automodule:: cloudinit.config.cc_users_groups diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 1bad07f6..e69a47a9 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -28,6 +28,7 @@ class GetSchemaTest(CiTestCase): 'cc_runcmd', 'cc_snap', 'cc_ubuntu_advantage', + 'cc_ubuntu_drivers', 'cc_zypper_add_repo' ], [subschema['id'] for subschema in schema['allOf']]) -- cgit v1.2.3 From 22e332933e78bc1c819c4f876d48620605ae813b Mon Sep 17 00:00:00 2001 From: Raphael Glon Date: Thu, 21 Mar 2019 13:38:53 +0000 Subject: net: Fix ipv6 static routes when using eni renderer When rendering ipv6 static routes in eni format the post-up/pre down commands were not correct for ipv6. LP: #1818669 --- cloudinit/net/eni.py | 16 +++-- tests/unittests/test_net.py | 147 +++++++++++++++++++++++++++++++++++++++----- 2 files changed, 143 insertions(+), 20 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 64236320..b129bb62 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -366,8 +366,6 @@ class Renderer(renderer.Renderer): down = indent + "pre-down route del" or_true = " || true" mapping = { - 'network': '-net', - 'netmask': 'netmask', 'gateway': 'gw', 'metric': 'metric', } @@ -379,13 +377,21 @@ class Renderer(renderer.Renderer): default_gw = ' -A inet6 default' route_line = '' - for k in ['network', 'netmask', 'gateway', 'metric']: - if default_gw and k in ['network', 'netmask']: + for k in ['network', 'gateway', 'metric']: + if default_gw and k == 'network': continue if k == 'gateway': route_line += '%s %s %s' % (default_gw, mapping[k], route[k]) elif k in route: - route_line += ' %s %s' % (mapping[k], route[k]) + if k == 'network': + if ':' in route[k]: + route_line += ' -A inet6' + else: + route_line += ' -net' + if 'prefix' in route: + route_line += ' %s/%s' % (route[k], route['prefix']) + else: + route_line += ' %s %s' % (mapping[k], route[k]) content.append(up + route_line + or_true) content.append(down + route_line + or_true) return content diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 468d544a..1b415b00 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1114,8 +1114,8 @@ iface eth0.101 inet static iface eth0.101 inet static address 192.168.2.10/24 -post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true -pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true """), 'expected_netplan': textwrap.dedent(""" network: @@ -1508,17 +1508,18 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - gateway: 192.168.0.3 netmask: 255.255.255.0 network: 10.1.3.0 - - gateway: 2001:67c:1562:1 - network: 2001:67c:1 - netmask: ffff:ffff:0 - - gateway: 3001:67c:1562:1 - network: 3001:67c:1 - netmask: ffff:ffff:0 - metric: 10000 - type: static address: 192.168.1.2/24 - type: static address: 2001:1::1/92 + routes: + - gateway: 2001:67c:1562:1 + network: 2001:67c:1 + netmask: ffff:ffff:0 + - gateway: 3001:67c:1562:1 + network: 3001:67c:1 + netmask: ffff:ffff:0 + metric: 10000 """), 'expected_netplan': textwrap.dedent(""" network: @@ -1557,6 +1558,51 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true to: 3001:67c:1/32 via: 3001:67c:1562:1 """), + 'expected_eni': textwrap.dedent("""\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0s1 +iface bond0s1 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-mode active-backup + bond-slaves none + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true + pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true + pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true + post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ +|| true + pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ +|| true + """), 'yaml-v2': textwrap.dedent(""" version: 2 ethernets: @@ -3633,17 +3679,17 @@ class TestEniRoundTrip(CiTestCase): 'iface eth0 inet static', ' address 172.23.31.42/26', ' gateway 172.23.31.2', - ('post-up route add -net 10.0.0.0 netmask 255.240.0.0 gw ' + ('post-up route add -net 10.0.0.0/12 gw ' '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 10.0.0.0 netmask 255.240.0.0 gw ' + ('pre-down route del -net 10.0.0.0/12 gw ' '172.23.31.1 metric 0 || true'), - ('post-up route add -net 192.168.2.0 netmask 255.255.0.0 gw ' + ('post-up route add -net 192.168.2.0/16 gw ' '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 192.168.2.0 netmask 255.255.0.0 gw ' + ('pre-down route del -net 192.168.2.0/16 gw ' '172.23.31.1 metric 0 || true'), - ('post-up route add -net 10.0.200.0 netmask 255.255.0.0 gw ' + ('post-up route add -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), - ('pre-down route del -net 10.0.200.0 netmask 255.255.0.0 gw ' + ('pre-down route del -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), ] found = files['/etc/network/interfaces'].splitlines() @@ -3651,6 +3697,77 @@ class TestEniRoundTrip(CiTestCase): self.assertEqual( expected, [line for line in found if line]) + def test_ipv6_static_routes(self): + # as reported in bug 1818669 + conf = [ + {'name': 'eno3', 'type': 'physical', + 'subnets': [{ + 'address': 'fd00::12/64', + 'dns_nameservers': ['fd00:2::15'], + 'gateway': 'fd00::1', + 'ipv6': True, + 'type': 'static', + 'routes': [{'netmask': '32', + 'network': 'fd00:12::', + 'gateway': 'fd00::2'}, + {'network': 'fd00:14::', + 'gateway': 'fd00::3'}, + {'destination': 'fe00:14::/48', + 'gateway': 'fe00::4', + 'metric': 500}, + {'gateway': '192.168.23.1', + 'metric': 999, + 'netmask': 24, + 'network': '192.168.23.0'}, + {'destination': '10.23.23.0/24', + 'gateway': '10.23.23.2', + 'metric': 300}]}]}, + ] + + files = self._render_and_read( + network_config={'config': conf, 'version': 1}) + expected = [ + 'auto lo', + 'iface lo inet loopback', + 'auto eno3', + 'iface eno3 inet6 static', + ' address fd00::12/64', + ' dns-nameservers fd00:2::15', + ' gateway fd00::1', + (' post-up route add -A inet6 fd00:12::/32 gw ' + 'fd00::2 || true'), + (' pre-down route del -A inet6 fd00:12::/32 gw ' + 'fd00::2 || true'), + (' post-up route add -A inet6 fd00:14::/64 gw ' + 'fd00::3 || true'), + (' pre-down route del -A inet6 fd00:14::/64 gw ' + 'fd00::3 || true'), + (' post-up route add -A inet6 fe00:14::/48 gw ' + 'fe00::4 metric 500 || true'), + (' pre-down route del -A inet6 fe00:14::/48 gw ' + 'fe00::4 metric 500 || true'), + (' post-up route add -net 192.168.23.0/24 gw ' + '192.168.23.1 metric 999 || true'), + (' pre-down route del -net 192.168.23.0/24 gw ' + '192.168.23.1 metric 999 || true'), + (' post-up route add -net 10.23.23.0/24 gw ' + '10.23.23.2 metric 300 || true'), + (' pre-down route del -net 10.23.23.0/24 gw ' + '10.23.23.2 metric 300 || true'), + + ] + found = files['/etc/network/interfaces'].splitlines() + + self.assertEqual( + expected, [line for line in found if line]) + + def testsimple_render_bond(self): + entry = NETWORK_CONFIGS['bond'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + class TestNetRenderers(CiTestCase): @mock.patch("cloudinit.net.renderers.sysconfig.available") -- cgit v1.2.3 From bb0b6f1d4e587d74a6e8fe17fa1c4dc3cf5287f7 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Mon, 25 Mar 2019 15:53:12 +0000 Subject: net/sysconfig: write out SUSE-compatible IPv6 config For writing IPv6 addresses to ifcfg-* the name "IPV6ADDR" is used. For secondary IPs the value for "IPV6ADDR_SECONDARIES" is set. On SUSE based distributions the names "IPADDR6" and "IPADDR6_$SOMELABEL" need to be used. --- cloudinit/net/sysconfig.py | 3 +++ tests/unittests/test_distros/test_netconfig.py | 2 ++ tests/unittests/test_net.py | 14 ++++++++++++++ 3 files changed, 19 insertions(+) (limited to 'tests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index e59753d5..09983929 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -386,10 +386,13 @@ class Renderer(renderer.Renderer): ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix']) if ipv6_index == 0: iface_cfg['IPV6ADDR'] = ipv6_cidr + iface_cfg['IPADDR6'] = ipv6_cidr elif ipv6_index == 1: iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr + iface_cfg['IPADDR6_0'] = ipv6_cidr else: iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr + iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr else: ipv4_index = ipv4_index + 1 suff = "" if ipv4_index == 0 else str(ipv4_index) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index e4530408..c3c0c8c5 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -496,6 +496,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 + IPADDR6=2607:f0d0:1002:0011::2/64 IPV6ADDR=2607:f0d0:1002:0011::2/64 IPV6INIT=yes IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 @@ -588,6 +589,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 + IPADDR6=2607:f0d0:1002:0011::2/64 IPV6ADDR=2607:f0d0:1002:0011::2/64 IPV6INIT=yes IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 1b415b00..fd03deb6 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -691,6 +691,9 @@ DEVICE=eth0 GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 +IPADDR6=2001:DB8::10/64 +IPADDR6_0=2001:DB9::10/64 +IPADDR6_2=2001:DB10::10/64 IPV6ADDR=2001:DB8::10/64 IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" IPV6INIT=yes @@ -729,6 +732,9 @@ DEVICE=eth0 GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 +IPADDR6=2001:DB8::10/64 +IPADDR6_0=2001:DB9::10/64 +IPADDR6_2=2001:DB10::10/64 IPV6ADDR=2001:DB8::10/64 IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" IPV6INIT=yes @@ -980,6 +986,7 @@ NETWORK_CONFIGS = { BOOTPROTO=none DEVICE=iface0 IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 IPV6ADDR=2001:1::1/64 IPV6INIT=yes NETMASK=255.255.255.0 @@ -1249,6 +1256,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEFROUTE=yes DEVICE=br0 IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 IPV6ADDR=2001:1::1/64 IPV6INIT=yes IPV6_DEFAULTGW=2001:4800:78ff:1b::1 @@ -1690,6 +1698,7 @@ iface bond0 inet6 static MACADDR=aa:bb:cc:dd:e8:ff IPADDR=192.168.0.2 IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 IPV6ADDR=2001:1::1/92 IPV6INIT=yes MTU=9000 @@ -1745,6 +1754,7 @@ iface bond0 inet6 static MACADDR=aa:bb:cc:dd:e8:ff IPADDR=192.168.0.2 IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 IPV6ADDR=2001:1::1/92 IPV6INIT=yes MTU=9000 @@ -1835,6 +1845,7 @@ iface bond0 inet6 static GATEWAY=192.168.1.1 IPADDR=192.168.2.2 IPADDR1=192.168.1.2 + IPADDR6=2001:1::bbbb/96 IPV6ADDR=2001:1::bbbb/96 IPV6INIT=yes IPV6_DEFAULTGW=2001:1::1 @@ -1896,6 +1907,7 @@ iface bond0 inet6 static BRIDGE=br0 DEVICE=eth0 HWADDR=52:54:00:12:34:00 + IPADDR6=2001:1::100/96 IPV6ADDR=2001:1::100/96 IPV6INIT=yes NM_CONTROLLED=no @@ -1909,6 +1921,7 @@ iface bond0 inet6 static BRIDGE=br0 DEVICE=eth1 HWADDR=52:54:00:12:34:01 + IPADDR6=2001:1::101/96 IPV6ADDR=2001:1::101/96 IPV6INIT=yes NM_CONTROLLED=no @@ -2743,6 +2756,7 @@ USERCTL=no GATEWAY=192.168.42.1 HWADDR=52:54:00:ab:cd:ef IPADDR=192.168.42.100 + IPADDR6=2001:db8::100/32 IPV6ADDR=2001:db8::100/32 IPV6INIT=yes IPV6_DEFAULTGW=2001:db8::1 -- cgit v1.2.3 From 0dc3a77f41f4544e4cb5a41637af7693410d4cdf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Tue, 26 Mar 2019 18:53:50 +0000 Subject: Azure: Ensure platform random_seed is always serializable as JSON. The Azure platform surfaces random bytes into /sys via Hyper-V. Python 2.7 json.dump() raises an exception if asked to convert a str with non-character content, and python 3.0 json.dump() won't serialize a "bytes" value. As a result, c-i instance data is often not written by Azure, making reboots slower (c-i has to repeat work). The random data is base64-encoded and then decoded into a string (str or unicode depending on the version of Python in use). The base64 string has just as many bits of entropy, so we're not throwing away useful "information", but we can be certain json.dump() will correctly serialize the bits. --- cloudinit/sources/DataSourceAzure.py | 24 +++++++++++++++++++----- tests/data/azure/non_unicode_random_string | 1 + tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++++++++-- 3 files changed, 42 insertions(+), 7 deletions(-) create mode 100644 tests/data/azure/non_unicode_random_string (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index eccbee5a..b4e3f061 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -54,6 +54,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' IMDS_URL = "http://169.254.169.254/metadata/" +PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by # stock ubuntu suported images. @@ -195,6 +196,8 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") + # TODO Find where platform entropy data is surfaced + PLATFORM_ENTROPY_SOURCE = None BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -1100,16 +1103,27 @@ def _check_freebsd_cdrom(cdrom_dev): return False -def _get_random_seed(): +def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" # azure / hyper-v provides random data here - # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config - if util.is_FreeBSD(): + if source is None: return None - return util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) + seed = util.load_file(source, quiet=True, decode=False) + + # The seed generally contains non-Unicode characters. load_file puts + # them into a str (in python 2) or bytes (in python 3). In python 2, + # bad octets in a str cause util.json_dumps() to throw an exception. In + # python 3, bytes is a non-serializable type, and the handler load_file + # uses applies b64 encoding *again* to handle it. The simplest solution + # is to just b64encode the data and then decode it to a serializable + # string. Same number of bits of entropy, just with 25% more zeroes. + # There's no need to undo this base64-encoding when the random seed is + # actually used in cc_seed_random.py. + seed = base64.b64encode(seed).decode() + + return seed def list_possible_azure_ds_devs(): diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string new file mode 100644 index 00000000..b9ecefb9 --- /dev/null +++ b/tests/data/azure/non_unicode_random_string @@ -0,0 +1 @@ +OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$ \ No newline at end of file diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 6b05b8f1..53c56cd0 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -7,11 +7,11 @@ from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, find_freebsd_part, get_path_dev_freebsd, - MountFailedError) + MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack) + ExitStack, resourceLocation) import crypt import httpretty @@ -1923,4 +1923,24 @@ class TestWBIsPlatformViable(CiTestCase): self.logs.getvalue()) +class TestRandomSeed(CiTestCase): + """Test proper handling of random_seed""" + + def test_non_ascii_seed_is_serializable(self): + """Pass if a random string from the Azure infrastructure which + contains at least one non-Unicode character can be converted to/from + JSON without alteration and without throwing an exception. + """ + path = resourceLocation("azure/non_unicode_random_string") + result = dsaz._get_random_seed(path) + + obj = {'seed': result} + try: + serialized = json_dumps(obj) + deserialized = load_json(serialized) + except UnicodeDecodeError: + self.fail("Non-serializable random seed returned") + + self.assertEqual(deserialized['seed'], result) + # vi: ts=4 expandtab -- cgit v1.2.3 From 47c53002ea7a661c674c3e409357db7e8a00297a Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 1 Apr 2019 14:24:26 +0000 Subject: cloud_tests: fix apt_pipelining test-cases The apt_pipelining test-cases were broken but until cloud-init changed it's default behavior to not disable, these silently passed as both only ever checked if pipelinging was disabled. First, the tests used the 'apt' namespace, which is not for configuring pipelining, rather that requires 'apt_pipelining' as the namespace. Second, the 'os' variant needs to check that cloud-init does not write a configuration file; it was a copy-and-paste error from the disable test-case. This branch fixes the config and collection to validate both scenarios. --- tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml | 3 +-- tests/cloud_tests/testcases/modules/apt_pipelining_os.py | 6 +++--- tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml | 9 ++++----- 3 files changed, 8 insertions(+), 10 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml index bd9b5d08..22a31dc4 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml @@ -5,8 +5,7 @@ required_features: - apt cloud_config: | #cloud-config - apt: - apt_pipelining: false + apt_pipelining: false collect_scripts: 90cloud-init-pipelining: | #!/bin/bash diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py index 740dc7c0..2b940a66 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py @@ -8,8 +8,8 @@ class TestAptPipeliningOS(base.CloudTestCase): """Test apt-pipelining module.""" def test_os_pipelining(self): - """Test pipelining set to os.""" - out = self.get_data_file('90cloud-init-pipelining') - self.assertIn('Acquire::http::Pipeline-Depth "0";', out) + """test 'os' settings does not write apt config file.""" + out = self.get_data_file('90cloud-init-pipelining_not_written') + self.assertEqual(0, int(out)) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml index cbed3ba3..86d5220b 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml @@ -1,15 +1,14 @@ # -# Set apt pipelining value to OS +# Set apt pipelining value to OS, no conf written # required_features: - apt cloud_config: | #cloud-config - apt: - apt_pipelining: os + apt_pipelining: os collect_scripts: - 90cloud-init-pipelining: | + 90cloud-init-pipelining_not_written: | #!/bin/bash - cat /etc/apt/apt.conf.d/90cloud-init-pipelining + ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l # vi: ts=4 expandtab -- cgit v1.2.3 From b76714c355a87416f9f07156b0f025aceaca7296 Mon Sep 17 00:00:00 2001 From: Risto Oikarinen Date: Tue, 9 Apr 2019 18:05:24 +0000 Subject: Change DataSourceNoCloud to ignore file system label's case. NoCloud data source now accepts both 'cidata' and 'CIDATA' as filesystem labels. This is similar to DataSourceConfigDrive's support for 'config-2' and 'CONFIG-2'. --- cloudinit/sources/DataSourceNoCloud.py | 4 ++- doc/rtd/topics/datasources/nocloud.rst | 2 +- tests/unittests/test_datasource/test_nocloud.py | 42 +++++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 17 ++++++++++ tools/ds-identify | 7 +++-- 5 files changed, 67 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 6860f0cc..fcf5d589 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -106,7 +106,9 @@ class DataSourceNoCloud(sources.DataSource): fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) - label_list = util.find_devs_with("LABEL=%s" % label) + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + devlist = list(set(fslist) & set(label_list)) devlist.sort(reverse=True) diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index 08578e86..1c5cf961 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -9,7 +9,7 @@ network at all). You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be -``cidata``. +``cidata`` or ``CIDATA``. Alternatively, you can provide meta-data via kernel command line or SMBIOS "serial number" option. The data must be passed in the form of a string: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 3429272c..b785362f 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'read_dmi_data', return_value=None)) + def _test_fs_config_is_read(self, fs_label, fs_label_to_search): + vfat_device = 'device-1' + + def m_mount_cb(device, callback, mtype): + if (device == vfat_device): + return {'meta-data': yaml.dump({'instance-id': 'IID'})} + else: + return {} + + def m_find_devs_with(query='', path=''): + if 'TYPE=vfat' == query: + return [vfat_device] + elif 'LABEL={}'.format(fs_label) == query: + return [vfat_device] + else: + return [] + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=m_find_devs_with)) + self.mocks.enter_context( + mock.patch.object(util, 'mount_cb', + side_effect=m_mount_cb)) + sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = b"USER_DATA_HERE" @@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase): ret = dsrc.get_data() self.assertFalse(ret) + def test_fs_config_lowercase_label(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'cidata') + + def test_fs_config_uppercase_label(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'cidata') + + def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'CIDATA') + + def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'CIDATA') + def test_no_datasource_expected(self, m_is_lxd): # no source should be found if no cmdline, config, and fs_label=None sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index d00c1b4b..8c18aa1a 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -520,6 +520,10 @@ class TestDsIdentify(DsIdentifyBase): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') + def test_nocloud_upper(self): + """NoCloud is found with uppercase filesystem label.""" + self._test_ds_found('NoCloudUpper') + def test_nocloud_seed(self): """Nocloud seed directory.""" self._test_ds_found('NoCloud-seed') @@ -713,6 +717,19 @@ VALID_CFG = { 'dev/vdb': 'pretend iso content for cidata\n', } }, + 'NoCloudUpper': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'NoCloud-seed': { 'ds': 'NoCloud', 'files': { diff --git a/tools/ds-identify b/tools/ds-identify index b78b2731..6518901e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -620,7 +620,7 @@ dscheck_MAAS() { } dscheck_NoCloud() { - local fslabel="cidata" d="" + local fslabel="cidata CIDATA" d="" case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac @@ -632,9 +632,10 @@ dscheck_NoCloud() { check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done - if has_fs_with_label "${fslabel}"; then + if has_fs_with_label $fslabel; then return ${DS_FOUND} fi + return ${DS_NOT_FOUND} } @@ -762,7 +763,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From 6322c2ddf4b68a8e7cc467a07fb20a1d151a2ef3 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 10 Apr 2019 20:21:37 +0000 Subject: Revert "DataSource: move update_events from a class to an instance..." Moving update_events from a class attribute to an instance attribute means that it doesn't exist on DataSource objects that are unpickled, causing tracebacks on cloud-init upgrade. As this change is only required for cloud-init installations which don't utilise ds-identify, we're backing it out to be reintroduced once the upgrade path bug has been addressed. This reverts commit f2fd6eac4407e60d0e98826ab03847dda4cde138. --- cloudinit/sources/DataSourceScaleway.py | 3 +-- cloudinit/sources/__init__.py | 6 +++--- cloudinit/sources/tests/test_init.py | 15 --------------- tests/unittests/test_datasource/test_scaleway.py | 7 ------- 4 files changed, 4 insertions(+), 27 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 54bfc1fe..b573b382 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -171,11 +171,10 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" + update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) - self.update_events = { - 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 1604932d..e6966b31 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -164,6 +164,9 @@ class DataSource(object): # A datasource which supports writing network config on each system boot # would call update_events['network'].add(EventType.BOOT). + # Default: generate network config on new instance id (first boot). + update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} + # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute # values are reset via clear_cached_attrs during any update_metadata call. @@ -188,9 +191,6 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None - # Default: generate network config on new instance id (first boot). - self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} - self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index cb1912be..6378e98b 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -575,21 +575,6 @@ class TestDataSource(CiTestCase): " events: New instance first boot", self.logs.getvalue()) - def test_data_sources_cant_mutate_update_events_for_others(self): - """update_events shouldn't be changed for other DSes (LP: #1819913)""" - - class ModifyingDS(DataSource): - - def __init__(self, sys_cfg, distro, paths): - # This mirrors what DataSourceAzure does which causes LP: - # #1819913 - DataSource.__init__(self, sys_cfg, distro, paths) - self.update_events['network'].add(EventType.BOOT) - - before_update_events = copy.deepcopy(self.datasource.update_events) - ModifyingDS(self.sys_cfg, self.distro, self.paths) - self.assertEqual(before_update_events, self.datasource.update_events) - class TestRedactSensitiveData(CiTestCase): diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 3bfd7527..f96bf0a2 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,7 +7,6 @@ import requests from cloudinit import helpers from cloudinit import settings -from cloudinit.event import EventType from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -404,9 +403,3 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') - - def test_update_events_is_correct(self): - """ensure update_events contains correct data""" - self.assertEqual( - {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, - self.datasource.update_events) -- cgit v1.2.3 From c8c32515778983d244126d4e359be9e91b3ce9e5 Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Thu, 18 Apr 2019 21:23:36 +0000 Subject: test_azure: mock util.SeLinuxGuard where needed Mock util.SeLinuxGuard to do nothing within tests that mock functions used by the guard, when those mocks confuse the guard. This has no impact when executing unit tests on systems which do not enable selinux (e.g. Ubuntu). LP: #1825253 --- tests/unittests/test_datasource/test_azure.py | 3 +++ tests/unittests/test_net.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 53c56cd0..ab77c034 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1375,12 +1375,15 @@ class TestCanDevBeReformatted(CiTestCase): self._domock(p + "util.mount_cb", 'm_mount_cb') self._domock(p + "os.path.realpath", 'm_realpath') self._domock(p + "os.path.exists", 'm_exists') + self._domock(p + "util.SeLinuxGuard", 'm_selguard') self.m_exists.side_effect = lambda p: p in bypath self.m_realpath.side_effect = realpath self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs self.m_mount_cb.side_effect = mount_cb self.m_partitions_on_device.side_effect = partitions_on_device + self.m_selguard.__enter__ = mock.Mock(return_value=False) + self.m_selguard.__exit__ = mock.Mock() def test_three_partitions_is_false(self): """A disk with 3 partitions can not be formatted.""" diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index fd03deb6..ca6ef97d 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3269,9 +3269,12 @@ class TestNetplanPostcommands(CiTestCase): mock_netplan_generate.assert_called_with(run=True) mock_net_setup_link.assert_called_with(run=True) + @mock.patch('cloudinit.util.SeLinuxGuard') @mock.patch.object(netplan, "get_devicelist") @mock.patch('cloudinit.util.subp') - def test_netplan_postcmds(self, mock_subp, mock_devlist): + def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): + mock_sel.__enter__ = mock.Mock(return_value=False) + mock_sel.__exit__ = mock.Mock() mock_devlist.side_effect = [['lo']] tmp_dir = self.tmp_dir() ns = network_state.parse_net_config_data(self.mycfg, -- cgit v1.2.3 From 5de83fc54c17b504842a924e7db08e8c2c1cebf9 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 22 Apr 2019 22:46:40 +0000 Subject: net/sysconfig: only indicate available on known sysconfig distros Restrict the sysconfig renderer availabily to known distros. Ubuntu/Debian systems may include network-manager but they do not have support for reading sysconfig network output; that is enabled via a Network-Manager plugin: ifcfg-rh which is not available in Ubuntu/Debian. LP: #1819994 --- cloudinit/net/sysconfig.py | 6 ++++-- tests/unittests/test_net.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 09983929..a47da0a8 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -18,6 +18,8 @@ from .network_state import ( LOG = logging.getLogger(__name__) NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" +KNOWN_DISTROS = [ + 'opensuse', 'sles', 'suse', 'redhat', 'fedora', 'centos'] def _make_header(sep='#'): @@ -717,8 +719,8 @@ class Renderer(renderer.Renderer): def available(target=None): sysconfig = available_sysconfig(target=target) nm = available_nm(target=target) - - return any([nm, sysconfig]) + return (util.get_linux_distro()[0] in KNOWN_DISTROS + and any([nm, sysconfig])) def available_sysconfig(target=None): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index ca6ef97d..9db01567 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3829,6 +3829,41 @@ class TestNetRenderers(CiTestCase): self.assertRaises(net.RendererNotFoundError, renderers.select, priority=['sysconfig', 'eni']) + @mock.patch("cloudinit.net.renderers.netplan.available") + @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig") + @mock.patch("cloudinit.net.renderers.sysconfig.available_nm") + @mock.patch("cloudinit.net.renderers.eni.available") + @mock.patch("cloudinit.net.renderers.sysconfig.util.get_linux_distro") + def test_sysconfig_selected_on_sysconfig_enabled_distros(self, m_distro, + m_eni, m_sys_nm, + m_sys_scfg, + m_netplan): + """sysconfig only selected on specific distros (rhel/sles).""" + + # Ubuntu with Network-Manager installed + m_eni.return_value = False # no ifupdown (ifquery) + m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown + m_sys_nm.return_value = True # network-manager is installed + m_netplan.return_value = True # netplan is installed + m_distro.return_value = ('ubuntu', None, None) + self.assertEqual('netplan', renderers.select(priority=None)[0]) + + # Centos with Network-Manager installed + m_eni.return_value = False # no ifupdown (ifquery) + m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown + m_sys_nm.return_value = True # network-manager is installed + m_netplan.return_value = False # netplan is not installed + m_distro.return_value = ('centos', None, None) + self.assertEqual('sysconfig', renderers.select(priority=None)[0]) + + # OpenSuse with Network-Manager installed + m_eni.return_value = False # no ifupdown (ifquery) + m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown + m_sys_nm.return_value = True # network-manager is installed + m_netplan.return_value = False # netplan is not installed + m_distro.return_value = ('opensuse', None, None) + self.assertEqual('sysconfig', renderers.select(priority=None)[0]) + class TestGetInterfaces(CiTestCase): _data = {'bonds': ['bond1'], -- cgit v1.2.3 From 784d3300f213c78d197a7ac8ad42cb098fd82356 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 27 Apr 2019 02:40:47 +0000 Subject: git tests: no longer show warning about safe yaml. Currently on 18.04, running tox -e py27 will spew errors like: .tests/unittests/test_net.py:2649: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details. The change here just uses cloud-init's yaml, which does safeloading by default. --- cloudinit/net/tests/test_init.py | 2 +- tests/unittests/test_net.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index f55c31e8..6d2affe7 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -7,11 +7,11 @@ import mock import os import requests import textwrap -import yaml import cloudinit.net as net from cloudinit.util import ensure_file, write_file, ProcessExecutionError from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase +from cloudinit import safeyaml as yaml class TestSysDevPath(CiTestCase): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 9db01567..e85e9640 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -9,6 +9,7 @@ from cloudinit.net import ( from cloudinit.sources.helpers import openstack from cloudinit import temp_utils from cloudinit import util +from cloudinit import safeyaml as yaml from cloudinit.tests.helpers import ( CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir) @@ -21,7 +22,7 @@ import json import os import re import textwrap -import yaml +from yaml.serializer import Serializer DHCP_CONTENT_1 = """ @@ -3575,7 +3576,7 @@ class TestNetplanRoundTrip(CiTestCase): # now look for any alias, avoid rendering them entirely # generate the first anchor string using the template # as of this writing, looks like "&id001" - anchor = r'&' + yaml.serializer.Serializer.ANCHOR_TEMPLATE % 1 + anchor = r'&' + Serializer.ANCHOR_TEMPLATE % 1 found_alias = re.search(anchor, content, re.MULTILINE) if found_alias: msg = "Error at: %s\nContent:\n%s" % (found_alias, content) -- cgit v1.2.3 From 86674f013dfcea3c075ab41373ffb475881066f6 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Mon, 29 Apr 2019 20:22:16 +0000 Subject: Azure: Changes to the Hyper-V KVP Reporter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit  + Truncate KVP Pool file to prevent stale entries from being processed by the Hyper-V KVP reporter.  + Drop filtering of KVPs as it is no longer needed.  + Batch appending of existing KVP entries. --- cloudinit/reporting/handlers.py | 117 +++++++++++++++---------------- tests/unittests/test_reporting_hyperv.py | 104 +++++++++++++-------------- 2 files changed, 106 insertions(+), 115 deletions(-) mode change 100644 => 100755 cloudinit/reporting/handlers.py mode change 100644 => 100755 tests/unittests/test_reporting_hyperv.py (limited to 'tests') diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py old mode 100644 new mode 100755 index 6d23558e..10165aec --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -5,7 +5,6 @@ import fcntl import json import six import os -import re import struct import threading import time @@ -14,6 +13,7 @@ from cloudinit import log as logging from cloudinit.registry import DictRegistry from cloudinit import (url_helper, util) from datetime import datetime +from six.moves.queue import Empty as QueueEmptyError if six.PY2: from multiprocessing.queues import JoinableQueue as JQueue @@ -129,24 +129,50 @@ class HyperVKvpReportingHandler(ReportingHandler): DESC_IDX_KEY = 'msg_i' JSON_SEPARATORS = (',', ':') KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1' + _already_truncated_pool_file = False def __init__(self, kvp_file_path=KVP_POOL_FILE_GUEST, event_types=None): super(HyperVKvpReportingHandler, self).__init__() self._kvp_file_path = kvp_file_path + HyperVKvpReportingHandler._truncate_guest_pool_file( + self._kvp_file_path) + self._event_types = event_types self.q = JQueue() - self.kvp_file = None self.incarnation_no = self._get_incarnation_no() self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, self.incarnation_no) - self._current_offset = 0 self.publish_thread = threading.Thread( target=self._publish_event_routine) self.publish_thread.daemon = True self.publish_thread.start() + @classmethod + def _truncate_guest_pool_file(cls, kvp_file): + """ + Truncate the pool file if it has not been truncated since boot. + This should be done exactly once for the file indicated by + KVP_POOL_FILE_GUEST constant above. This method takes a filename + so that we can use an arbitrary file during unit testing. + Since KVP is a best-effort telemetry channel we only attempt to + truncate the file once and only if the file has not been modified + since boot. Additional truncation can lead to loss of existing + KVPs. + """ + if cls._already_truncated_pool_file: + return + boot_time = time.time() - float(util.uptime()) + try: + if os.path.getmtime(kvp_file) < boot_time: + with open(kvp_file, "w"): + pass + except (OSError, IOError) as e: + LOG.warning("failed to truncate kvp pool file, %s", e) + finally: + cls._already_truncated_pool_file = True + def _get_incarnation_no(self): """ use the time passed as the incarnation number. @@ -162,20 +188,15 @@ class HyperVKvpReportingHandler(ReportingHandler): def _iterate_kvps(self, offset): """iterate the kvp file from the current offset.""" - try: - with open(self._kvp_file_path, 'rb+') as f: - self.kvp_file = f - fcntl.flock(f, fcntl.LOCK_EX) - f.seek(offset) + with open(self._kvp_file_path, 'rb') as f: + fcntl.flock(f, fcntl.LOCK_EX) + f.seek(offset) + record_data = f.read(self.HV_KVP_RECORD_SIZE) + while len(record_data) == self.HV_KVP_RECORD_SIZE: + kvp_item = self._decode_kvp_item(record_data) + yield kvp_item record_data = f.read(self.HV_KVP_RECORD_SIZE) - while len(record_data) == self.HV_KVP_RECORD_SIZE: - self._current_offset += self.HV_KVP_RECORD_SIZE - kvp_item = self._decode_kvp_item(record_data) - yield kvp_item - record_data = f.read(self.HV_KVP_RECORD_SIZE) - fcntl.flock(f, fcntl.LOCK_UN) - finally: - self.kvp_file = None + fcntl.flock(f, fcntl.LOCK_UN) def _event_key(self, event): """ @@ -207,23 +228,13 @@ class HyperVKvpReportingHandler(ReportingHandler): return {'key': k, 'value': v} - def _update_kvp_item(self, record_data): - if self.kvp_file is None: - raise ReportException( - "kvp file '{0}' not opened." - .format(self._kvp_file_path)) - self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1) - self.kvp_file.write(record_data) - def _append_kvp_item(self, record_data): - with open(self._kvp_file_path, 'rb+') as f: + with open(self._kvp_file_path, 'ab') as f: fcntl.flock(f, fcntl.LOCK_EX) - # seek to end of the file - f.seek(0, 2) - f.write(record_data) + for data in record_data: + f.write(data) f.flush() fcntl.flock(f, fcntl.LOCK_UN) - self._current_offset = f.tell() def _break_down(self, key, meta_data, description): del meta_data[self.MSG_KEY] @@ -279,40 +290,26 @@ class HyperVKvpReportingHandler(ReportingHandler): def _publish_event_routine(self): while True: + items_from_queue = 0 try: event = self.q.get(block=True) - need_append = True + items_from_queue += 1 + encoded_data = [] + while event is not None: + encoded_data += self._encode_event(event) + try: + # get all the rest of the events in the queue + event = self.q.get(block=False) + items_from_queue += 1 + except QueueEmptyError: + event = None try: - if not os.path.exists(self._kvp_file_path): - LOG.warning( - "skip writing events %s to %s. file not present.", - event.as_string(), - self._kvp_file_path) - encoded_event = self._encode_event(event) - # for each encoded_event - for encoded_data in (encoded_event): - for kvp in self._iterate_kvps(self._current_offset): - match = ( - re.match( - r"^{0}\|(\d+)\|.+" - .format(self.EVENT_PREFIX), - kvp['key'] - )) - if match: - match_groups = match.groups(0) - if int(match_groups[0]) < self.incarnation_no: - need_append = False - self._update_kvp_item(encoded_data) - continue - if need_append: - self._append_kvp_item(encoded_data) - except IOError as e: - LOG.warning( - "failed posting event to kvp: %s e:%s", - event.as_string(), e) + self._append_kvp_item(encoded_data) + except (OSError, IOError) as e: + LOG.warning("failed posting events to kvp, %s", e) finally: - self.q.task_done() - + for _ in range(items_from_queue): + self.q.task_done() # when main process exits, q.get() will through EOFError # indicating we should exit this thread. except EOFError: @@ -322,7 +319,7 @@ class HyperVKvpReportingHandler(ReportingHandler): # if the kvp pool already contains a chunk of data, # so defer it to another thread. def publish_event(self, event): - if (not self._event_types or event.event_type in self._event_types): + if not self._event_types or event.event_type in self._event_types: self.q.put(event) def flush(self): diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py old mode 100644 new mode 100755 index 2e64c6c7..d01ed5b3 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -1,10 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.reporting import events -from cloudinit.reporting import handlers +from cloudinit.reporting.handlers import HyperVKvpReportingHandler import json import os +import struct +import time from cloudinit import util from cloudinit.tests.helpers import CiTestCase @@ -13,7 +15,7 @@ from cloudinit.tests.helpers import CiTestCase class TestKvpEncoding(CiTestCase): def test_encode_decode(self): kvp = {'key': 'key1', 'value': 'value1'} - kvp_reporting = handlers.HyperVKvpReportingHandler() + kvp_reporting = HyperVKvpReportingHandler() data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) decoded_kvp = kvp_reporting._decode_kvp_item(data) @@ -26,57 +28,9 @@ class TextKvpReporter(CiTestCase): self.tmp_file_path = self.tmp_path('kvp_pool_file') util.ensure_file(self.tmp_file_path) - def test_event_type_can_be_filtered(self): - reporter = handlers.HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path, - event_types=['foo', 'bar']) - - reporter.publish_event( - events.ReportingEvent('foo', 'name', 'description')) - reporter.publish_event( - events.ReportingEvent('some_other', 'name', 'description3')) - reporter.q.join() - - kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(1, len(kvps)) - - reporter.publish_event( - events.ReportingEvent('bar', 'name', 'description2')) - reporter.q.join() - kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(2, len(kvps)) - - self.assertIn('foo', kvps[0]['key']) - self.assertIn('bar', kvps[1]['key']) - self.assertNotIn('some_other', kvps[0]['key']) - self.assertNotIn('some_other', kvps[1]['key']) - - def test_events_are_over_written(self): - reporter = handlers.HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - - self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) - - reporter.publish_event( - events.ReportingEvent('foo', 'name1', 'description')) - reporter.publish_event( - events.ReportingEvent('foo', 'name2', 'description')) - reporter.q.join() - self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) - - reporter2 = handlers.HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - reporter2.incarnation_no = reporter.incarnation_no + 1 - reporter2.publish_event( - events.ReportingEvent('foo', 'name3', 'description')) - reporter2.q.join() - - self.assertEqual(2, len(list(reporter2._iterate_kvps(0)))) - def test_events_with_higher_incarnation_not_over_written(self): - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) - self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) reporter.publish_event( @@ -86,7 +40,7 @@ class TextKvpReporter(CiTestCase): reporter.q.join() self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) - reporter3 = handlers.HyperVKvpReportingHandler( + reporter3 = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) reporter3.incarnation_no = reporter.incarnation_no - 1 reporter3.publish_event( @@ -95,7 +49,7 @@ class TextKvpReporter(CiTestCase): self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) def test_finish_event_result_is_logged(self): - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) reporter.publish_event( events.FinishReportingEvent('name2', 'description1', @@ -105,7 +59,7 @@ class TextKvpReporter(CiTestCase): def test_file_operation_issue(self): os.remove(self.tmp_file_path) - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) reporter.publish_event( events.FinishReportingEvent('name2', 'description1', @@ -113,7 +67,7 @@ class TextKvpReporter(CiTestCase): reporter.q.join() def test_event_very_long(self): - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE long_event = events.FinishReportingEvent( @@ -132,3 +86,43 @@ class TextKvpReporter(CiTestCase): self.assertEqual(msg_slice['msg_i'], i) full_description += msg_slice['msg'] self.assertEqual(description, full_description) + + def test_not_truncate_kvp_file_modified_after_boot(self): + with open(self.tmp_file_path, "wb+") as f: + kvp = {'key': 'key1', 'value': 'value1'} + data = (struct.pack("%ds%ds" % ( + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), + kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) + f.write(data) + cur_time = time.time() + os.utime(self.tmp_file_path, (cur_time, cur_time)) + + # reset this because the unit test framework + # has already polluted the class variable + HyperVKvpReportingHandler._already_truncated_pool_file = False + + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + def test_truncate_stale_kvp_file(self): + with open(self.tmp_file_path, "wb+") as f: + kvp = {'key': 'key1', 'value': 'value1'} + data = (struct.pack("%ds%ds" % ( + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), + kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) + f.write(data) + + # set the time ways back to make it look like + # we had an old kvp file + os.utime(self.tmp_file_path, (1000000, 1000000)) + + # reset this because the unit test framework + # has already polluted the class variable + HyperVKvpReportingHandler._already_truncated_pool_file = False + + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(0, len(kvps)) -- cgit v1.2.3 From ab6621d849b24bb652243e88c79f6f3b446048d7 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 8 May 2019 14:54:03 +0000 Subject: DataSourceAzure: Adjust timeout for polling IMDS If the IMDS primary server is not available, falling back to the secondary server takes about 1s. The net result is that the expected E2E time is slightly more than 1s. This change increases the timeout to 2s to prevent the infinite loop of timeouts. --- cloudinit/sources/DataSourceAzure.py | 15 ++++++++++----- tests/unittests/test_datasource/test_azure.py | 10 +++++++--- 2 files changed, 17 insertions(+), 8 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 64165259..b7440c1d 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -57,7 +57,12 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' + +# In the event where the IMDS primary server is not +# available, it takes 1s to fallback to the secondary one +IMDS_TIMEOUT_IN_SECONDS = 2 IMDS_URL = "http://169.254.169.254/metadata/" + PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by @@ -582,9 +587,9 @@ class DataSourceAzure(sources.DataSource): return self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True, - log_req_resp=False).contents + return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, exception_cb=exc_cb, + infinite=True, log_req_resp=False).contents except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -1291,8 +1296,8 @@ def _get_metadata_from_imds(retries): headers = {"Metadata": "true"} try: response = readurl( - url, timeout=1, headers=headers, retries=retries, - exception_cb=retry_on_url_exc) + url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, + retries=retries, exception_cb=retry_on_url_exc) except Exception as e: LOG.debug('Ignoring IMDS instance metadata: %s', e) return {} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index ab77c034..427ab7e7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -163,7 +163,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): m_readurl.assert_called_with( self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, timeout=1) + headers={'Metadata': 'true'}, retries=2, + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) @mock.patch('cloudinit.url_helper.time.sleep') @mock.patch(MOCKPATH + 'net.is_up') @@ -1791,7 +1792,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs() - }, method='GET', timeout=1, + }, method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( @@ -1828,7 +1830,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs()}, - method='GET', timeout=1, url=full_url)]) + method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', -- cgit v1.2.3 From acc25d8d7d603313059ac35b4253b504efc560a9 Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Wed, 8 May 2019 22:47:07 +0000 Subject: cc_mounts: check if mount -a on no-change fstab path Under some circumstances, cc_disk_setup may reformat volumes which already appear in /etc/fstab (e.g. Azure ephemeral drive is reformatted from NTFS to ext4 after service-heal). Normally, cc_mounts only calls mount -a if it altered /etc/fstab. With this change cc_mounts will read /proc/mounts and verify if configured mounts are already mounted and if not raise flag to request a mount -a. This handles the case where no changes to fstab occur but a mount -a is required due to change in underlying device which prevented the .mount unit from running until after disk was reformatted. LP: #1825596 --- cloudinit/config/cc_mounts.py | 11 ++++++++ .../unittests/test_handler/test_handler_mounts.py | 30 +++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 339baba9..123ffb84 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -439,6 +439,7 @@ def handle(_name, cfg, cloud, log, _args): cc_lines = [] needswap = False + need_mount_all = False dirs = [] for line in actlist: # write 'comment' in the fs_mntops, entry, claiming this @@ -449,11 +450,18 @@ def handle(_name, cfg, cloud, log, _args): dirs.append(line[1]) cc_lines.append('\t'.join(line)) + mount_points = [v['mountpoint'] for k, v in util.mounts().items() + if 'mountpoint' in v] for d in dirs: try: util.ensure_dir(d) except Exception: util.logexc(log, "Failed to make '%s' config-mount", d) + # dirs is list of directories on which a volume should be mounted. + # If any of them does not already show up in the list of current + # mount points, we will definitely need to do mount -a. + if not need_mount_all and d not in mount_points: + need_mount_all = True sadds = [WS.sub(" ", n) for n in cc_lines] sdrops = [WS.sub(" ", n) for n in fstab_removed] @@ -473,6 +481,9 @@ def handle(_name, cfg, cloud, log, _args): log.debug("No changes to /etc/fstab made.") else: log.debug("Changes to fstab: %s", sops) + need_mount_all = True + + if need_mount_all: activate_cmds.append(["mount", "-a"]) if uses_systemd: activate_cmds.append(["systemctl", "daemon-reload"]) diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py index 8fea6c2a..0fb160be 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -154,7 +154,15 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): return_value=True) self.add_patch('cloudinit.config.cc_mounts.util.subp', - 'mock_util_subp') + 'm_util_subp') + + self.add_patch('cloudinit.config.cc_mounts.util.mounts', + 'mock_util_mounts', + return_value={ + '/dev/sda1': {'fstype': 'ext4', + 'mountpoint': '/', + 'opts': 'rw,relatime,discard' + }}) self.mock_cloud = mock.Mock() self.mock_log = mock.Mock() @@ -230,4 +238,24 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): fstab_new_content = fd.read() self.assertEqual(fstab_expected_content, fstab_new_content) + def test_no_change_fstab_sets_needs_mount_all(self): + '''verify unchanged fstab entries are mounted if not call mount -a''' + fstab_original_content = ( + 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n' + 'LABEL=UEFI /boot/efi vfat defaults 0 0\n' + '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' + ) + fstab_expected_content = fstab_original_content + cc = {'mounts': [ + ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]} + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) + self.m_util_subp.assert_has_calls([ + mock.call(['mount', '-a']), + mock.call(['systemctl', 'daemon-reload'])]) + # vi: ts=4 expandtab -- cgit v1.2.3 From ce5fe3a20e86c4745d0310bb9c344d1344d9684c Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Thu, 9 May 2019 18:05:25 +0000 Subject: tests: add Eoan release --- tests/cloud_tests/releases.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'tests') diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index ec5da724..924ad956 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -129,6 +129,22 @@ features: releases: # UBUNTU ================================================================= + eoan: + # EOL: Jul 2020 + default: + enabled: true + release: eoan + version: 19.10 + os: ubuntu + feature_groups: + - base + - debian_base + - ubuntu_specific + lxd: + sstreams_server: https://cloud-images.ubuntu.com/daily + alias: eoan + setup_overrides: null + override_templates: false disco: # EOL: Jan 2020 default: -- cgit v1.2.3 From baa478546d8cac98a706010699d64f8c2f70b5bf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 10 May 2019 18:38:55 +0000 Subject: Azure: Return static fallback address as if failed to find endpoint The Azure data source helper attempts to use information in the dhcp lease to find the Wireserver endpoint (IP address). Under some unusual circumstances, those attempts will fail. This change uses a static address, known to be always correct in the Azure public and sovereign clouds, when the helper fails to locate a valid dhcp lease. This address is not guaranteed to be correct in Azure Stack environments; it's still best to use the information from the lease whenever possible. --- cloudinit/sources/helpers/azure.py | 14 +++++++++++--- tests/unittests/test_datasource/test_azure_helper.py | 9 +++++++-- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index d3af05ee..82c4c8c4 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -20,6 +20,9 @@ from cloudinit.reporting import events LOG = logging.getLogger(__name__) +# This endpoint matches the format as found in dhcp lease files, since this +# value is applied if the endpoint can't be found within a lease file +DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" azure_ds_reporter = events.ReportEventStack( name="azure-ds", @@ -297,7 +300,12 @@ class WALinuxAgentShim(object): @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] - content = util.load_file(fallback_lease_file) + try: + content = util.load_file(fallback_lease_file) + except IOError as ex: + LOG.error("Failed to read %s: %s", fallback_lease_file, ex) + return None + LOG.debug("content is %s", content) option_name = _get_dhcp_endpoint_option_name() for line in content.splitlines(): @@ -372,9 +380,9 @@ class WALinuxAgentShim(object): fallback_lease_file) value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) - if value is None: - raise ValueError('No endpoint found.') + LOG.warning("No lease found; using default endpoint") + value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 02556165..bd006aba 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -67,12 +67,17 @@ class TestFindEndpoint(CiTestCase): self.networkd_leases.return_value = None def test_missing_file(self): - self.assertRaises(ValueError, wa_shim.find_endpoint) + """wa_shim find_endpoint uses default endpoint if leasefile not found + """ + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") def test_missing_special_azure_line(self): + """wa_shim find_endpoint uses default endpoint if leasefile is found + but does not contain DHCP Option 245 (whose value is the endpoint) + """ self.load_file.return_value = '' self.dhcp_options.return_value = {'eth0': {'key': 'value'}} - self.assertRaises(ValueError, wa_shim.find_endpoint) + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") @staticmethod def _build_lease_content(encoded_address): -- cgit v1.2.3 From 0f8695323262e41c699588c7cd140f6b58c62017 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Fri, 24 May 2019 21:39:19 +0000 Subject: freebsd: NoCloud data source support blkid is a Linux-only command. With this patch, cloud-init uses another approach to find the data source on FreeBSD. LP: #1645824 --- cloudinit/sources/DataSourceNoCloud.py | 40 ++++++++++++++----------- config/cloud.cfg.tmpl | 4 +-- tests/unittests/test_datasource/test_nocloud.py | 18 +++++++++++ 3 files changed, 43 insertions(+), 19 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index fcf5d589..8a9e5dd2 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -35,6 +35,26 @@ class DataSourceNoCloud(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) + def _get_devices(self, label): + if util.is_FreeBSD(): + devlist = [ + p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label] + if os.path.exists(p)] + else: + # Query optical drive to get it in blkid cache for 2.6 kernels + util.find_devs_with(path="/dev/sr0") + util.find_devs_with(path="/dev/sr1") + + fslist = util.find_devs_with("TYPE=vfat") + fslist.extend(util.find_devs_with("TYPE=iso9660")) + + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + + devlist = list(set(fslist) & set(label_list)) + devlist.sort(reverse=True) + return devlist + def _get_data(self): defaults = { "instance-id": "nocloud", @@ -99,20 +119,7 @@ class DataSourceNoCloud(sources.DataSource): label = self.ds_cfg.get('fs_label', "cidata") if label is not None: - # Query optical drive to get it in blkid cache for 2.6 kernels - util.find_devs_with(path="/dev/sr0") - util.find_devs_with(path="/dev/sr1") - - fslist = util.find_devs_with("TYPE=vfat") - fslist.extend(util.find_devs_with("TYPE=iso9660")) - - label_list = util.find_devs_with("LABEL=%s" % label.upper()) - label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) - - devlist = list(set(fslist) & set(label_list)) - devlist.sort(reverse=True) - - for dev in devlist: + for dev in self._get_devices(label): try: LOG.debug("Attempting to use data from %s", dev) @@ -120,9 +127,8 @@ class DataSourceNoCloud(sources.DataSource): seeded = util.mount_cb(dev, _pp2d_callback, pp2d_kwargs) except ValueError: - if dev in label_list: - LOG.warning("device %s with label=%s not a" - "valid seed.", dev, label) + LOG.warning("device %s with label=%s not a" + "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 25db43e0..684c7473 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -32,8 +32,8 @@ preserve_hostname: false {% if variant in ["freebsd"] %} # This should not be required, but leave it in place until the real cause of -# not beeing able to find -any- datasources is resolved. -datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] +# not finding -any- datasources is resolved. +datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] {% endif %} # Example datasource config # datasource: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index b785362f..18bea0b9 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -278,6 +278,24 @@ class TestNoCloudDataSource(CiTestCase): self.assertEqual(netconf, dsrc.network_config) self.assertNotIn(gateway, str(dsrc.network_config)) + @mock.patch("cloudinit.util.blkid") + def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + self.mocks.enter_context( + mock.patch.object(util, 'is_FreeBSD', return_value=True)) + + self.mocks.enter_context( + mock.patch.object(os.path, 'exists', return_value=True)) + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc._get_devices('foo') + self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) + fake_blkid.assert_not_called() + class TestParseCommandLineData(CiTestCase): -- cgit v1.2.3 From 6197c347c3960254dbcdb28eb73989d062ad9689 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Tue, 28 May 2019 15:39:48 +0000 Subject: freebsd: ability to grow root file system - UFS file system support - GPT partition table support - add support for newfs's -L parameter (label) - move freebsd specific test from Azure to freebsd --- cloudinit/config/cc_growpart.py | 3 +- cloudinit/config/cc_resizefs.py | 6 +-- cloudinit/util.py | 22 ++++++----- tests/unittests/test_datasource/test_azure.py | 24 ------------ tests/unittests/test_distros/test_freebsd.py | 45 ++++++++++++++++++++++ .../test_handler/test_handler_resizefs.py | 2 +- 6 files changed, 64 insertions(+), 38 deletions(-) create mode 100644 tests/unittests/test_distros/test_freebsd.py (limited to 'tests') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index bafca9d8..564f376f 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -215,7 +215,8 @@ def device_part_info(devpath): # FreeBSD doesn't know of sysfs so just get everything we need from # the device, like /dev/vtbd0p2. if util.is_FreeBSD(): - m = re.search('^(/dev/.+)p([0-9])$', devpath) + freebsd_part = "/dev/" + util.find_freebsd_part(devpath) + m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) return (m.group(1), m.group(2)) if not os.path.exists(syspath): diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 076b9d5a..afd2e060 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth): def _resize_ufs(mount_point, devpth): - return ('growfs', '-y', devpth) + return ('growfs', '-y', mount_point) def _resize_zfs(mount_point, devpth): @@ -101,7 +101,7 @@ def _can_skip_resize_ufs(mount_point, devpth): """ # dumpfs -m / # newfs command for / (/dev/label/rootfs) - newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 + newfs -L rootf -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf """ cur_fs_sz = None @@ -110,7 +110,7 @@ def _can_skip_resize_ufs(mount_point, devpth): for line in dumpfs_res.splitlines(): if not line.startswith('#'): newfs_cmd = shlex.split(line) - opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' + opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:L:' optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value) for o, a in optlist: if o == "-s": diff --git a/cloudinit/util.py b/cloudinit/util.py index ea4199cd..aa23b3f3 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2337,17 +2337,21 @@ def parse_mtab(path): return None -def find_freebsd_part(label_part): - if label_part.startswith("/dev/label/"): - target_label = label_part[5:] - (label_part, _err) = subp(['glabel', 'status', '-s']) - for labels in label_part.split("\n"): +def find_freebsd_part(fs): + splitted = fs.split('/') + if len(splitted) == 3: + return splitted[2] + elif splitted[2] in ['label', 'gpt', 'ufs']: + target_label = fs[5:] + (part, _err) = subp(['glabel', 'status', '-s']) + for labels in part.split("\n"): items = labels.split() - if len(items) > 0 and items[0].startswith(target_label): - label_part = items[2] + if len(items) > 0 and items[0] == target_label: + part = items[2] break - label_part = str(label_part) - return label_part + return str(part) + else: + LOG.warning("Unexpected input in find_freebsd_part: %s", fs) def get_path_dev_freebsd(path, mnt_list): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 427ab7e7..afb614e4 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -6,7 +6,6 @@ from cloudinit import url_helper from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, - find_freebsd_part, get_path_dev_freebsd, MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( @@ -391,29 +390,6 @@ scbus-1 on xpt0 bus 0 dev = ds.get_resource_disk_on_freebsd(1) self.assertEqual("da1", dev) - @mock.patch('cloudinit.util.subp') - def test_find_freebsd_part_on_Azure(self, mock_subp): - glabel_out = ''' -gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 - label/rootfs N/A da0p2 - label/swap N/A da0p3 -''' - mock_subp.return_value = (glabel_out, "") - res = find_freebsd_part("/dev/label/rootfs") - self.assertEqual("da0p2", res) - - def test_get_path_dev_freebsd_on_Azure(self): - mnt_list = ''' -/dev/label/rootfs / ufs rw 1 1 -devfs /dev devfs rw,multilabel 0 0 -fdescfs /dev/fd fdescfs rw 0 0 -/dev/da1s1 /mnt/resource ufs rw 2 2 -''' - with mock.patch.object(os.path, 'exists', - return_value=True): - res = get_path_dev_freebsd('/etc', mnt_list) - self.assertIsNotNone(res) - @mock.patch(MOCKPATH + '_is_platform_viable') def test_call_is_platform_viable_seed(self, m_is_platform_viable): """Check seed_dir using _is_platform_viable and return False.""" diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/test_distros/test_freebsd.py new file mode 100644 index 00000000..8af253a2 --- /dev/null +++ b/tests/unittests/test_distros/test_freebsd.py @@ -0,0 +1,45 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd) +from cloudinit.tests.helpers import (CiTestCase, mock) + +import os + + +class TestDeviceLookUp(CiTestCase): + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_label(self, mock_subp): + glabel_out = ''' +gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 + label/rootfs N/A da0p2 + label/swap N/A da0p3 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/label/rootfs") + self.assertEqual("da0p2", res) + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_gpt(self, mock_subp): + glabel_out = ''' + gpt/bootfs N/A vtbd0p1 +gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1 + gpt/swapfs N/A vtbd0p2 + gpt/rootfs N/A vtbd0p3 + iso9660/cidata N/A vtbd2 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/gpt/rootfs") + self.assertEqual("vtbd0p3", res) + + def test_get_path_dev_freebsd_label(self): + mnt_list = ''' +/dev/label/rootfs / ufs rw 1 1 +devfs /dev devfs rw,multilabel 0 0 +fdescfs /dev/fd fdescfs rw 0 0 +/dev/da1s1 /mnt/resource ufs rw 2 2 +''' + with mock.patch.object(os.path, 'exists', + return_value=True): + res = get_path_dev_freebsd('/etc', mnt_list) + self.assertIsNotNone(res) diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 35187847..db9a0414 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -147,7 +147,7 @@ class TestResizefs(CiTestCase): def test_resize_ufs_cmd_return(self): mount_point = '/' devpth = '/dev/sda2' - self.assertEqual(('growfs', '-y', devpth), + self.assertEqual(('growfs', '-y', mount_point), _resize_ufs(mount_point, devpth)) @mock.patch('cloudinit.util.is_container', return_value=False) -- cgit v1.2.3 From ded1ec81e3c6c37c5241b12fcc3c41182e675dff Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 29 May 2019 04:59:43 +0000 Subject: netplan: update netplan key mappings for gratuitous-arp Previous versions of netplan included a misspelling for the bond parameter around gratuitous-arp. This has been fixed and released and cloud-init needs to accept both values. This branch fixes the key that will be rendered and transforms the previous misspelling when capturing network_state. LP: #1827238 --- cloudinit/net/network_state.py | 8 ++++++++ tests/unittests/test_net.py | 46 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) (limited to 'tests') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 4d19f562..3702130a 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -707,6 +707,14 @@ class NetworkStateInterpreter(object): item_params = dict((key, value) for (key, value) in item_cfg.items() if key not in NETWORK_V2_KEY_FILTER) + # we accept the fixed spelling, but write the old for compatability + # Xenial does not have an updated netplan which supports the + # correct spelling. LP: #1756701 + params = item_params['parameters'] + grat_value = params.pop('gratuitous-arp', None) + if grat_value: + params['gratuitious-arp'] = grat_value + v1_cmd = { 'type': cmd_type, 'name': item_name, diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e85e9640..b936bc9c 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -407,6 +407,37 @@ network: - maas """ +NETPLAN_BOND_GRAT_ARP = """ +network: + bonds: + bond0: + interfaces: + - ens3 + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + parameters: + gratuitious-arp: 1 + bond1: + interfaces: + - ens4 + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + parameters: + gratuitous-arp: 2 + ethernets: + ens3: + dhcp4: false + dhcp6: false + match: + macaddress: 52:54:00:ab:cd:ef + ens4: + dhcp4: false + dhcp6: false + match: + macaddress: 52:54:00:11:22:ff + version: 2 +""" + NETPLAN_DHCP_FALSE = """ version: 2 ethernets: @@ -3589,6 +3620,21 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def test_render_output_supports_both_grat_arp_spelling(self): + entry = { + 'yaml': NETPLAN_BOND_GRAT_ARP, + 'expected_netplan': NETPLAN_BOND_GRAT_ARP.replace('gratuitous', + 'gratuitious'), + } + network_config = yaml.load(entry['yaml']).get('network') + files = self._render_and_read(network_config=network_config) + print(entry['expected_netplan']) + print('-- expected ^ | v rendered --') + print(files['/etc/netplan/50-cloud-init.yaml']) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + class TestEniRoundTrip(CiTestCase): -- cgit v1.2.3 From deaeb714a3582ff7f31e411bcdaf9669903e35f0 Mon Sep 17 00:00:00 2001 From: "Mark T. Voelker" Date: Mon, 3 Jun 2019 15:37:42 +0000 Subject: Allow identification of OpenStack by Asset Tag When OpenStack is deployed on some hypervisors (such as VMware vSphere), cloud-init doesn't detect that it needs to probe the metadata service because the DMI product name field can't be set to a field that is recognized by cloud-init. However, the asset tag field can be set via flavor extra specs or image metadata. A similar approach is already used to identify Open Telekom Cloud. This patch allows cloud init to recognize "OpenStack Nova" or "OpenStack Compute" in the asset tag field as an indication that the instance being configured is running on an OpenStack platform. LP: #1669875 --- tests/unittests/test_ds_identify.py | 20 ++++++++++++++++++++ tools/ds-identify | 8 ++++++++ 2 files changed, 28 insertions(+) (limited to 'tests') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 8c18aa1a..7575223f 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -435,6 +435,14 @@ class TestDsIdentify(DsIdentifyBase): """Open Telecom identification.""" self._test_ds_found('OpenStack-OpenTelekom') + def test_openstack_asset_tag_nova(self): + """OpenStack identification via asset tag OpenStack Nova.""" + self._test_ds_found('OpenStack-AssetTag-Nova') + + def test_openstack_asset_tag_copute(self): + """OpenStack identification via asset tag OpenStack Compute.""" + self._test_ds_found('OpenStack-AssetTag-Compute') + def test_openstack_on_non_intel_is_maybe(self): """On non-Intel, openstack without dmi info is maybe. @@ -759,6 +767,18 @@ VALID_CFG = { 'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'}, 'mocks': [MOCK_VIRT_IS_XEN], }, + 'OpenStack-AssetTag-Nova': { + # VMware vSphere can't modify product-name, LP: #1669875 + 'ds': 'OpenStack', + 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Nova\n'}, + 'mocks': [MOCK_VIRT_IS_XEN], + }, + 'OpenStack-AssetTag-Compute': { + # VMware vSphere can't modify product-name, LP: #1669875 + 'ds': 'OpenStack', + 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Compute\n'}, + 'mocks': [MOCK_VIRT_IS_XEN], + }, 'OVF-seed': { 'ds': 'OVF', 'files': { diff --git a/tools/ds-identify b/tools/ds-identify index 6518901e..e16708f6 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -979,6 +979,14 @@ dscheck_OpenStack() { return ${DS_FOUND} fi + # LP: #1669875 : allow identification of OpenStack by asset tag + if dmi_chassis_asset_tag_matches "$nova"; then + return ${DS_FOUND} + fi + if dmi_chassis_asset_tag_matches "$compute"; then + return ${DS_FOUND} + fi + # LP: #1715241 : arch other than intel are not identified properly. case "$DI_UNAME_MACHINE" in i?86|x86_64) :;; -- cgit v1.2.3 From f57a77577dd708c7f57babf8cd63ec18134bf34a Mon Sep 17 00:00:00 2001 From: Penghui Liao Date: Fri, 21 Jun 2019 19:41:43 +0000 Subject: sysconfig: support more bonding options Currently, only a few bonding parameters can be configured on sysconfig systems. This patch aims to support more parameters documented on the docs site. --- cloudinit/net/sysconfig.py | 12 +++++++++ tests/unittests/test_net.py | 60 +++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 67 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index a47da0a8..be5dede7 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -284,6 +284,18 @@ class Renderer(renderer.Renderer): ('bond_mode', "mode=%s"), ('bond_xmit_hash_policy', "xmit_hash_policy=%s"), ('bond_miimon', "miimon=%s"), + ('bond_min_links', "min_links=%s"), + ('bond_arp_interval', "arp_interval=%s"), + ('bond_arp_ip_target', "arp_ip_target=%s"), + ('bond_arp_validate', "arp_validate=%s"), + ('bond_ad_select', "ad_select=%s"), + ('bond_num_grat_arp', "num_grat_arp=%s"), + ('bond_downdelay', "downdelay=%s"), + ('bond_updelay', "updelay=%s"), + ('bond_lacp_rate', "lacp_rate=%s"), + ('bond_fail_over_mac', "fail_over_mac=%s"), + ('bond_primary', "primary=%s"), + ('bond_primary_reselect', "primary_reselect=%s"), ]) bridge_opts_keys = tuple([ diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b936bc9c..18efce98 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1540,6 +1540,12 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true bond-mode: active-backup bond_miimon: 100 bond-xmit-hash-policy: "layer3+4" + bond-num-grat-arp: 5 + bond-downdelay: 10 + bond-updelay: 20 + bond-fail-over-mac: active + bond-primary: bond0s0 + bond-primary-reselect: always subnets: - type: static address: 192.168.0.2/24 @@ -1586,9 +1592,15 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true macaddress: aa:bb:cc:dd:e8:ff mtu: 9000 parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitious-arp: 5 mii-monitor-interval: 100 mode: active-backup + primary: bond0s0 + primary-reselect-policy: always transmit-hash-policy: layer3+4 + up-delay: 20 routes: - to: 10.1.3.0/24 via: 192.168.0.3 @@ -1604,15 +1616,27 @@ iface lo inet loopback auto bond0s0 iface bond0s0 inet manual + bond-downdelay 10 + bond-fail-over-mac active bond-master bond0 bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 bond-xmit-hash-policy layer3+4 bond_miimon 100 auto bond0s1 iface bond0s1 inet manual + bond-downdelay 10 + bond-fail-over-mac active bond-master bond0 bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 bond-xmit-hash-policy layer3+4 bond_miimon 100 @@ -1620,8 +1644,14 @@ auto bond0 iface bond0 inet static address 192.168.0.2/24 gateway 192.168.0.1 + bond-downdelay 10 + bond-fail-over-mac active bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always bond-slaves none + bond-updelay 20 bond-xmit-hash-policy layer3+4 bond_miimon 100 hwaddress aa:bb:cc:dd:e8:ff @@ -1666,10 +1696,15 @@ iface bond0 inet6 static - eth0 - vf0 parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitious-arp: 5 mii-monitor-interval: 100 mode: active-backup - primary: vf0 - transmit-hash-policy: "layer3+4" + primary: bond0s0 + primary-reselect-policy: always + transmit-hash-policy: layer3+4 + up-delay: 20 routes: - to: 10.1.3.0/24 via: 192.168.0.3 @@ -1692,10 +1727,15 @@ iface bond0 inet6 static - eth0 - vf0 parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitious-arp: 5 mii-monitor-interval: 100 mode: active-backup - primary: vf0 + primary: bond0s0 + primary-reselect-policy: always transmit-hash-policy: layer3+4 + up-delay: 20 routes: - to: 10.1.3.0/24 via: 192.168.0.3 @@ -1720,7 +1760,12 @@ iface bond0 inet6 static 'expected_sysconfig_opensuse': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" BONDING_SLAVE0=bond0s0 BONDING_SLAVE1=bond0s1 BOOTPROTO=none @@ -1776,7 +1821,12 @@ iface bond0 inet6 static 'expected_sysconfig_rhel': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" BONDING_SLAVE0=bond0s0 BONDING_SLAVE1=bond0s1 BOOTPROTO=none -- cgit v1.2.3 From feebec1cbb462208003460d68d909e76cb68e0e2 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 25 Jun 2019 16:06:27 +0000 Subject: azure: add region and AZ properties from imds compute location metadata This allows cloud-init query region to show valid region data for Azure --- cloudinit/sources/DataSourceAzure.py | 9 +++++ tests/unittests/test_datasource/test_azure.py | 47 +++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 7 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b7440c1d..d2fad9bb 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -683,6 +683,11 @@ class DataSourceAzure(sources.DataSource): DS_CFG_KEY_PRESERVE_NTFS, False)) return + @property + def availability_zone(self): + return self.metadata.get( + 'imds', {}).get('compute', {}).get('platformFaultDomain') + @property def network_config(self): """Generate a network config like net.generate_fallback_network() with @@ -701,6 +706,10 @@ class DataSourceAzure(sources.DataSource): self._network_config = parse_network_config(nc_src) return self._network_config + @property + def region(self): + return self.metadata.get('imds', {}).get('compute', {}).get('location') + def _partitions_on_device(devpath, maxnum=16): # return a list of tuples (ptnum, path) for each part on devpath diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index afb614e4..f27ef21b 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -84,6 +84,25 @@ def construct_valid_ovf_env(data=None, pubkeys=None, NETWORK_METADATA = { + "compute": { + "location": "eastus2", + "name": "my-hostname", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "srugroup1", + "sku": "19.04-DAILY", + "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", + "tags": "", + "version": "19.04.201906190", + "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", + "vmScaleSetName": "", + "vmSize": "Standard_DS1_v2", + "zone": "" + }, "network": { "interface": [ { @@ -478,13 +497,7 @@ scbus-1 on xpt0 bus 0 expected_metadata = { 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, - 'imds': {'network': {'interface': [{ - 'ipv4': {'ipAddress': [ - {'privateIpAddress': '10.0.0.4', - 'publicIpAddress': '104.46.124.81'}], - 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, - 'ipv6': {'ipAddress': []}, - 'macAddress': '000D3A047598'}]}}, + 'imds': NETWORK_METADATA, 'instance-id': 'test-instance-id', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -612,6 +625,26 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) + def test_availability_zone_set_from_imds(self): + """Datasource.availability returns IMDS platformFaultDomain.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('0', dsrc.availability_zone) + + def test_region_set_from_imds(self): + """Datasource.region returns IMDS region location.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('eastus2', dsrc.region) + def test_user_cfg_set_agent_command(self): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} -- cgit v1.2.3 From 9c47c682b7aaa185c32a68f4dea8e23e9a2ef565 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Tue, 16 Jul 2019 13:09:38 +0000 Subject: VMWare: Trigger the post customization script via cc_scripts module. cloud-init does not trigger reboots of a VM therefore adding custom scripts to rc.local does not execute the post scripts. This patch moves post-scripts into per-instance scripts dir and has cc_scripts module run the post-scripts. Also in this branch: - Remove the sh interpreter and execute the customization script directly. - Update the unit test. LP: #1833192 --- cloudinit/sources/DataSourceOVF.py | 7 +- .../helpers/vmware/imc/config_custom_script.py | 143 ++++++--------------- tests/unittests/test_vmware/test_custom_script.py | 116 +++++++++-------- 3 files changed, 111 insertions(+), 155 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 70e7a5c0..dd941d2e 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -148,6 +148,9 @@ class DataSourceOVF(sources.DataSource): product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name + ccScriptsDir = os.path.join( + self.paths.get_cpath("scripts"), + "per-instance") except Exception as e: _raise_error_status( "Error parsing the customization Config File", @@ -201,7 +204,9 @@ class DataSourceOVF(sources.DataSource): if customscript: try: - postcust = PostCustomScript(customscript, imcdirpath) + postcust = PostCustomScript(customscript, + imcdirpath, + ccScriptsDir) postcust.execute() except Exception as e: _raise_error_status( diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py index a7d4ad91..9f14770e 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py @@ -1,5 +1,5 @@ # Copyright (C) 2017 Canonical Ltd. -# Copyright (C) 2017 VMware Inc. +# Copyright (C) 2017-2019 VMware Inc. # # Author: Maitreyee Saikia # @@ -8,7 +8,6 @@ import logging import os import stat -from textwrap import dedent from cloudinit import util @@ -20,12 +19,15 @@ class CustomScriptNotFound(Exception): class CustomScriptConstant(object): - RC_LOCAL = "/etc/rc.local" - POST_CUST_TMP_DIR = "/root/.customization" - POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh" - POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR, - POST_CUST_RUN_SCRIPT_NAME) - POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + CUSTOM_TMP_DIR = "/root/.customization" + + # The user defined custom script + CUSTOM_SCRIPT_NAME = "customize.sh" + CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, + CUSTOM_SCRIPT_NAME) + POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + # The cc_scripts_per_instance script to launch custom script + POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh" class RunCustomScript(object): @@ -39,10 +41,19 @@ class RunCustomScript(object): raise CustomScriptNotFound("Script %s not found!! " "Cannot execute custom script!" % self.scriptpath) + + util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR) + + LOG.debug("Copying custom script to %s", + CustomScriptConstant.CUSTOM_SCRIPT) + util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT) + # Strip any CR characters from the decoded script - util.load_file(self.scriptpath).replace("\r", "") - st = os.stat(self.scriptpath) - os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC) + content = util.load_file( + CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "") + util.write_file(CustomScriptConstant.CUSTOM_SCRIPT, + content, + mode=0o544) class PreCustomScript(RunCustomScript): @@ -50,104 +61,34 @@ class PreCustomScript(RunCustomScript): """Executing custom script with precustomization argument.""" LOG.debug("Executing pre-customization script") self.prepare_script() - util.subp(["/bin/sh", self.scriptpath, "precustomization"]) + util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"]) class PostCustomScript(RunCustomScript): - def __init__(self, scriptname, directory): + def __init__(self, scriptname, directory, ccScriptsDir): super(PostCustomScript, self).__init__(scriptname, directory) - # Determine when to run custom script. When postreboot is True, - # the user uploaded script will run as part of rc.local after - # the machine reboots. This is determined by presence of rclocal. - # When postreboot is False, script will run as part of cloud-init. - self.postreboot = False - - def _install_post_reboot_agent(self, rclocal): - """ - Install post-reboot agent for running custom script after reboot. - As part of this process, we are editing the rclocal file to run a - VMware script, which in turn is resposible for handling the user - script. - @param: path to rc local. - """ - LOG.debug("Installing post-reboot customization from %s to %s", - self.directory, rclocal) - if not self.has_previous_agent(rclocal): - LOG.info("Adding post-reboot customization agent to rc.local") - new_content = dedent(""" - # Run post-reboot guest customization - /bin/sh %s - exit 0 - """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT - existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') - st = os.stat(rclocal) - # "x" flag should be set - mode = st.st_mode | stat.S_IEXEC - util.write_file(rclocal, existing_rclocal + new_content, mode) - - else: - # We don't need to update rclocal file everytime a customization - # is requested. It just needs to be done for the first time. - LOG.info("Post-reboot guest customization agent is already " - "registered in rc.local") - LOG.debug("Installing post-reboot customization agent finished: %s", - self.postreboot) - - def has_previous_agent(self, rclocal): - searchstring = "# Run post-reboot guest customization" - if searchstring in open(rclocal).read(): - return True - return False - - def find_rc_local(self): - """ - Determine if rc local is present. - """ - rclocal = "" - if os.path.exists(CustomScriptConstant.RC_LOCAL): - LOG.debug("rc.local detected.") - # resolving in case of symlink - rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL) - LOG.debug("rc.local resolved to %s", rclocal) - else: - LOG.warning("Can't find rc.local, post-customization " - "will be run before reboot") - return rclocal - - def install_agent(self): - rclocal = self.find_rc_local() - if rclocal: - self._install_post_reboot_agent(rclocal) - self.postreboot = True + self.ccScriptsDir = ccScriptsDir + self.ccScriptPath = os.path.join( + ccScriptsDir, + CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME) def execute(self): """ - This method executes post-customization script before or after reboot - based on the presence of rc local. + This method copy the post customize run script to + cc_scripts_per_instance directory and let this + module to run post custom script. """ self.prepare_script() - self.install_agent() - if not self.postreboot: - LOG.warning("Executing post-customization script inline") - util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) - else: - LOG.debug("Scheduling custom script to run post reboot") - if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): - os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) - # Script "post-customize-guest.sh" and user uploaded script are - # are present in the same directory and needs to copied to a temp - # directory to be executed post reboot. User uploaded script is - # saved as customize.sh in the temp directory. - # post-customize-guest.sh excutes customize.sh after reboot. - LOG.debug("Copying post-customization script") - util.copy(self.scriptpath, - CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") - LOG.debug("Copying script to run post-customization script") - util.copy( - os.path.join(self.directory, - CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), - CustomScriptConstant.POST_CUST_RUN_SCRIPT) - LOG.info("Creating post-reboot pending marker") - util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER) + + LOG.debug("Copying post customize run script to %s", + self.ccScriptPath) + util.copy( + os.path.join(self.directory, + CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME), + self.ccScriptPath) + st = os.stat(self.ccScriptPath) + os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC) + LOG.info("Creating post customization pending marker") + util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER) # vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/test_vmware/test_custom_script.py index 2d9519b0..f89f8157 100644 --- a/tests/unittests/test_vmware/test_custom_script.py +++ b/tests/unittests/test_vmware/test_custom_script.py @@ -1,10 +1,12 @@ # Copyright (C) 2015 Canonical Ltd. -# Copyright (C) 2017 VMware INC. +# Copyright (C) 2017-2019 VMware INC. # # Author: Maitreyee Saikia # # This file is part of cloud-init. See LICENSE file for license information. +import os +import stat from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptConstant, @@ -18,6 +20,10 @@ from cloudinit.tests.helpers import CiTestCase, mock class TestVmwareCustomScript(CiTestCase): def setUp(self): self.tmpDir = self.tmp_dir() + # Mock the tmpDir as the root dir in VM. + self.execDir = os.path.join(self.tmpDir, ".customization") + self.execScript = os.path.join(self.execDir, + ".customize.sh") def test_prepare_custom_script(self): """ @@ -37,63 +43,67 @@ class TestVmwareCustomScript(CiTestCase): # Custom script exists. custScript = self.tmp_path("test-cust", self.tmpDir) - util.write_file(custScript, "test-CR-strip/r/r") - postCust = PostCustomScript("test-cust", self.tmpDir) - self.assertEqual("test-cust", postCust.scriptname) - self.assertEqual(self.tmpDir, postCust.directory) - self.assertEqual(custScript, postCust.scriptpath) - self.assertFalse(postCust.postreboot) - postCust.prepare_script() - # Check if all carraige returns are stripped from script. - self.assertFalse("/r" in custScript) + util.write_file(custScript, "test-CR-strip\r\r") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + postCust = PostCustomScript("test-cust", + self.tmpDir, + self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + postCust.prepare_script() - def test_rc_local_exists(self): - """ - This test is designed to verify the different scenarios associated - with the presence of rclocal. - """ - # test when rc local does not exist - postCust = PostCustomScript("test-cust", self.tmpDir) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", "/no/path"): - rclocal = postCust.find_rc_local() - self.assertEqual("", rclocal) - - # test when rc local exists - rclocalFile = self.tmp_path("vmware-rclocal", self.tmpDir) - util.write_file(rclocalFile, "# Run post-reboot guest customization", - omode="w") - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalFile): - rclocal = postCust.find_rc_local() - self.assertEqual(rclocalFile, rclocal) - self.assertTrue(postCust.has_previous_agent, rclocal) - - # test when rc local is a symlink - rclocalLink = self.tmp_path("dummy-rclocal-link", self.tmpDir) - util.sym_link(rclocalFile, rclocalLink, True) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalLink): - rclocal = postCust.find_rc_local() - self.assertEqual(rclocalFile, rclocal) + # Custom script is copied with exec privilege + self.assertTrue(os.path.exists(self.execScript)) + st = os.stat(self.execScript) + self.assertTrue(st.st_mode & stat.S_IEXEC) + with open(self.execScript, "r") as f: + content = f.read() + self.assertEqual(content, "test-CR-strip") + # Check if all carraige returns are stripped from script. + self.assertFalse("\r" in content) def test_execute_post_cust(self): """ - This test is to identify if rclocal was properly populated to be - run after reboot. + This test is designed to verify the behavior after execute post + customization. """ - customscript = self.tmp_path("vmware-post-cust-script", self.tmpDir) - rclocal = self.tmp_path("vmware-rclocal", self.tmpDir) - # Create a temporary rclocal file - open(customscript, "w") - util.write_file(rclocal, "tests\nexit 0", omode="w") - postCust = PostCustomScript("vmware-post-cust-script", self.tmpDir) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocal): - # Test that guest customization agent is not installed initially. - self.assertFalse(postCust.postreboot) - self.assertIs(postCust.has_previous_agent(rclocal), False) - postCust.install_agent() + # Prepare the customize package + postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) + util.write_file(postCustRun, "This is the script to run post cust") + userScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(userScript, "This is the post cust script") - # Assert rclocal has been modified to have guest customization - # agent. - self.assertTrue(postCust.postreboot) - self.assertTrue(postCust.has_previous_agent, rclocal) + # Mock the cc_scripts_per_instance dir and marker file. + # Create another tmp dir for cc_scripts_per_instance. + ccScriptDir = self.tmp_dir() + ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") + markerFile = os.path.join(self.tmpDir, ".markerFile") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + with mock.patch.object(CustomScriptConstant, + "POST_CUSTOM_PENDING_MARKER", + markerFile): + postCust = PostCustomScript("test-cust", + self.tmpDir, + ccScriptDir) + postCust.execute() + # Check cc_scripts_per_instance and marker file + # are created. + self.assertTrue(os.path.exists(ccScript)) + with open(ccScript, "r") as f: + content = f.read() + self.assertEqual(content, + "This is the script to run post cust") + self.assertTrue(os.path.exists(markerFile)) # vi: ts=4 expandtab -- cgit v1.2.3 From d9769c475d38a8c30084b1e7537ae3f0359ed3ad Mon Sep 17 00:00:00 2001 From: Pengpeng Sun Date: Tue, 16 Jul 2019 14:31:17 +0000 Subject: Add a cdrom size checker for OVF ds to ds-identify With a large size ISO file attached to iso dev, ds-identify might grep it entirely if iso dev is ISO9660, it takes very long time to start OS. Resolve this by: - Adding a checker to read the ISO size (from sysfs). If the size of the ISO filesystem is > 10MiB then the ISO will be ignored (logged as oversized). - Move the ovf vmware guest customization checker to be ahead of cdrom ovf checker, so no need check the ISO size if vmware guest customization is enabled. LP: #1806701 --- tests/unittests/test_ds_identify.py | 25 ++++++++++++++++++++++++ tools/ds-identify | 39 ++++++++++++++++++++++++------------- 2 files changed, 51 insertions(+), 13 deletions(-) (limited to 'tests') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7575223f..587e6993 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -524,6 +524,30 @@ class TestDsIdentify(DsIdentifyBase): self._check_via_dict( ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def test_ovf_on_vmware_iso_found_by_cdrom_with_different_size(self): + """OVF is identified by well-known iso9660 labels.""" + ovf_cdrom_with_size = copy.deepcopy(VALID_CFG['OVF']) + + # Set cdrom size to 20480 (10MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '20480\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled") + + # Set cdrom size to 204800 (100MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '204800\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled") + + # Set cdrom size to 18432 (9MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '18432\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + + # Set cdrom size to 2048 (1MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '2048\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def test_default_nocloud_as_vdb_iso9660(self): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') @@ -815,6 +839,7 @@ VALID_CFG = { ], 'files': { 'dev/sr0': 'pretend ovf iso has ' + OVF_MATCH_STRING + '\n', + 'sys/class/block/sr0/size': '2048\n', } }, 'OVF-guestinfo': { diff --git a/tools/ds-identify b/tools/ds-identify index e16708f6..0305e361 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -766,10 +766,34 @@ is_cdrom_ovf() { config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; esac + # skip device which size is 10MB or larger + local size="" sfile="${PATH_SYS_CLASS_BLOCK}/${dev##*/}/size" + [ -f "$sfile" ] || return 1 + read size <"$sfile" || { warn "failed reading from $sfile"; return 1; } + # size is in 512 byte units. so convert to MB (integer division) + if [ $((size/2048)) -ge 10 ]; then + debug 2 "$dev: size $((size/2048))MB is considered too large for OVF" + return 1 + fi + local idstr="http://schemas.dmtf.org/ovf/environment/1" grep --quiet --ignore-case "$idstr" "${PATH_ROOT}$dev" } +has_ovf_cdrom() { + # DI_ISO9660_DEVS is =label,=label2 + # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces + if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then + local oifs="$IFS" + # shellcheck disable=2086 + { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; } + for tok in "$@"; do + is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return 0 + done + fi + return 1 +} + dscheck_OVF() { check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" @@ -780,20 +804,9 @@ dscheck_OVF() { ovf_vmware_transport_guestinfo && return "${DS_FOUND}" - # DI_ISO9660_DEVS is =label,=label2 - # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces - if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then - local oifs="$IFS" - # shellcheck disable=2086 - { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; } - for tok in "$@"; do - is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND - done - fi + has_ovf_cdrom && return "${DS_FOUND}" - if ovf_vmware_guest_customization; then - return ${DS_FOUND} - fi + ovf_vmware_guest_customization && return "${DS_FOUND}" return ${DS_NOT_FOUND} } -- cgit v1.2.3 From a785462959e8746cc16609a159fee33dedd713b1 Mon Sep 17 00:00:00 2001 From: Conrad Hoffmann Date: Tue, 16 Jul 2019 19:30:41 +0000 Subject: Support netplan renderer in Arch Linux Support is for now implemented in such a way that it will fall back to the old `_write_network()` if netplan is not available on the image. --- cloudinit/distros/arch.py | 14 +++++ tests/unittests/test_distros/test_netconfig.py | 86 ++++++++++++++++++++++++++ 2 files changed, 100 insertions(+) (limited to 'tests') diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index b814c8ba..9f89c5f9 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -12,6 +12,8 @@ from cloudinit import util from cloudinit.distros import net_util from cloudinit.distros.parsers.hostname import HostnameConf +from cloudinit.net.renderers import RendererNotFoundError + from cloudinit.settings import PER_INSTANCE import os @@ -24,6 +26,11 @@ class Distro(distros.Distro): network_conf_dir = "/etc/netctl" resolve_conf_fn = "/etc/resolv.conf" init_cmd = ['systemctl'] # init scripts + renderer_configs = { + "netplan": {"netplan_path": "/etc/netplan/50-cloud-init.yaml", + "netplan_header": "# generated by cloud-init\n", + "postcmds": True} + } def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -50,6 +57,13 @@ class Distro(distros.Distro): self.update_package_sources() self.package_command('', pkgs=pkglist) + def _write_network_config(self, netconfig): + try: + return self._supported_write_network_config(netconfig) + except RendererNotFoundError: + # Fall back to old _write_network + raise NotImplementedError + def _write_network(self, settings): entries = net_util.translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index c3c0c8c5..07b5c0a7 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -614,6 +614,92 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): expected_cfgs=expected_cfgs.copy()) +class TestNetCfgDistroArch(TestNetCfgDistroBase): + def setUp(self): + super(TestNetCfgDistroArch, self).setUp() + self.distro = self._get_distro('arch', renderers=['netplan']) + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False, with_netplan=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.netplan.available', + return_value=with_netplan): + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def netctl_path(self, iface): + return '/etc/netctl/%s' % iface + + def netplan_path(self): + return '/etc/netplan/50-cloud-init.yaml' + + def test_apply_network_config_v1_without_netplan(self): + # Note that this is in fact an invalid netctl config: + # "Address=None/None" + # But this is what the renderer has been writing out for a long time, + # and the test's purpose is to assert that the netctl renderer is + # still being used in absence of netplan, not the correctness of the + # rendered netctl config. + expected_cfgs = { + self.netctl_path('eth0'): dedent("""\ + Address=192.168.1.5/255.255.255.0 + Connection=ethernet + DNS=() + Gateway=192.168.1.254 + IP=static + Interface=eth0 + """), + self.netctl_path('eth1'): dedent("""\ + Address=None/None + Connection=ethernet + DNS=() + Gateway= + IP=dhcp + Interface=eth1 + """), + } + + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + with_netplan=False) + + def test_apply_network_config_v1_with_netplan(self): + expected_cfgs = { + self.netplan_path(): dedent("""\ + # generated by cloud-init + network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.5/24 + gateway4: 192.168.1.254 + eth1: + dhcp4: true + """), + } + + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + with_netplan=True) + + def get_mode(path, target=None): return os.stat(util.target_path(target, path)).st_mode & 0o777 -- cgit v1.2.3 From 14048171f8e5273b1b82c6d2ed5675a7d1a6eeb6 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Tue, 16 Jul 2019 20:57:32 +0000 Subject: templates/ntp.conf.debian.tmpl: fix missing newline for pools MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The debian ntp.conf template did not contain a newline for the comment used to mark the rendered ntp pools configured. This resulted in an invalid line: '# poolspool 0.int.pool.ntp.org iburst' rather than: '# pools  pool 0.int.pool.ntp.org iburst' This patch fixes the template and updates the unittest to verify that the rendered templates puts servers and pools at the beginning of a line. LP: #1836598 --- templates/ntp.conf.debian.tmpl | 3 ++- tests/unittests/test_handler/test_handler_ntp.py | 25 ++++++++++++++---------- 2 files changed, 17 insertions(+), 11 deletions(-) (limited to 'tests') diff --git a/templates/ntp.conf.debian.tmpl b/templates/ntp.conf.debian.tmpl index 3f07eeaa..affe983d 100644 --- a/templates/ntp.conf.debian.tmpl +++ b/templates/ntp.conf.debian.tmpl @@ -19,7 +19,8 @@ filegen clockstats file clockstats type day enable # pool.ntp.org maps to about 1000 low-stratum NTP servers. Your server will # pick a different set every time it starts up. Please consider joining the # pool: -{% if pools -%}# pools{% endif %} +{% if pools %}# pools +{% endif %} {% for pool in pools -%} pool {{pool}} iburst {% endfor %} diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 0f22e579..463d892a 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -268,17 +268,22 @@ class TestNtp(FilesystemMockingTestCase): template_fn=template_fn) content = util.load_file(confpath) if client in ['ntp', 'chrony']: - expected_servers = '\n'.join([ - 'server {0} iburst'.format(srv) for srv in servers]) + content_lines = content.splitlines() + expected_servers = [ + 'server {0} iburst'.format(srv) for srv in servers] print('distro=%s client=%s' % (distro, client)) - self.assertIn(expected_servers, content, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, distro))) - expected_pools = '\n'.join([ - 'pool {0} iburst'.format(pool) for pool in pools]) - self.assertIn(expected_pools, content, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, distro))) + for sline in expected_servers: + self.assertIn(sline, content_lines, + ('failed to render {0} conf' + ' for distro:{1}'.format(client, + distro))) + expected_pools = [ + 'pool {0} iburst'.format(pool) for pool in pools] + for pline in expected_pools: + self.assertIn(pline, content_lines, + ('failed to render {0} conf' + ' for distro:{1}'.format(client, + distro))) elif client == 'systemd-timesyncd': expected_content = ( "# cloud-init generated file\n" + -- cgit v1.2.3 From 07b17236be5665bb552c7460102bcd07bf8f2be8 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Tue, 16 Jul 2019 22:40:15 +0000 Subject: net: add rfc3442 (classless static routes) to EphemeralDHCP The EphemeralDHCP context manager did not parse or handle rfc3442 classless static routes which prevented reading datasource metadata in some clouds. This branch adds support for extracting the field from the leases output, parsing the format and then adding the required iproute2 ip commands to apply (and teardown) the static routes. LP: #1821102 --- cloudinit/net/__init__.py | 34 +++++++- cloudinit/net/dhcp.py | 90 +++++++++++++++++++ cloudinit/net/tests/test_dhcp.py | 120 +++++++++++++++++++++++++- cloudinit/net/tests/test_init.py | 39 +++++++++ tests/unittests/test_datasource/test_azure.py | 6 +- tests/unittests/test_datasource/test_ec2.py | 3 +- 6 files changed, 286 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index e758006f..624c9b42 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -679,7 +679,7 @@ class EphemeralIPv4Network(object): """ def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None, - connectivity_url=None): + connectivity_url=None, static_routes=None): """Setup context manager and validate call signature. @param interface: Name of the network interface to bring up. @@ -690,6 +690,7 @@ class EphemeralIPv4Network(object): @param router: Optionally the default gateway IP. @param connectivity_url: Optionally, a URL to verify if a usable connection already exists. + @param static_routes: Optionally a list of static routes from DHCP """ if not all([interface, ip, prefix_or_mask, broadcast]): raise ValueError( @@ -706,6 +707,7 @@ class EphemeralIPv4Network(object): self.ip = ip self.broadcast = broadcast self.router = router + self.static_routes = static_routes self.cleanup_cmds = [] # List of commands to run to cleanup state. def __enter__(self): @@ -718,7 +720,21 @@ class EphemeralIPv4Network(object): return self._bringup_device() - if self.router: + + # rfc3442 requires us to ignore the router config *if* classless static + # routes are provided. + # + # https://tools.ietf.org/html/rfc3442 + # + # If the DHCP server returns both a Classless Static Routes option and + # a Router option, the DHCP client MUST ignore the Router option. + # + # Similarly, if the DHCP server returns both a Classless Static Routes + # option and a Static Routes option, the DHCP client MUST ignore the + # Static Routes option. + if self.static_routes: + self._bringup_static_routes() + elif self.router: self._bringup_router() def __exit__(self, excp_type, excp_value, excp_traceback): @@ -762,6 +778,20 @@ class EphemeralIPv4Network(object): ['ip', '-family', 'inet', 'addr', 'del', cidr, 'dev', self.interface]) + def _bringup_static_routes(self): + # static_routes = [("169.254.169.254/32", "130.56.248.255"), + # ("0.0.0.0/0", "130.56.240.1")] + for net_address, gateway in self.static_routes: + via_arg = [] + if gateway != "0.0.0.0/0": + via_arg = ['via', gateway] + util.subp( + ['ip', '-4', 'route', 'add', net_address] + via_arg + + ['dev', self.interface], capture=True) + self.cleanup_cmds.insert( + 0, ['ip', '-4', 'route', 'del', net_address] + via_arg + + ['dev', self.interface]) + def _bringup_router(self): """Perform the ip commands to fully setup the router if needed.""" # Check if a default route exists and exit if it does diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index c98a97cd..17379918 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -92,10 +92,14 @@ class EphemeralDHCPv4(object): nmap = {'interface': 'interface', 'ip': 'fixed-address', 'prefix_or_mask': 'subnet-mask', 'broadcast': 'broadcast-address', + 'static_routes': 'rfc3442-classless-static-routes', 'router': 'routers'} kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()]) if not kwargs['broadcast']: kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) + if kwargs['static_routes']: + kwargs['static_routes'] = ( + parse_static_routes(kwargs['static_routes'])) if self.connectivity_url: kwargs['connectivity_url'] = self.connectivity_url ephipv4 = EphemeralIPv4Network(**kwargs) @@ -272,4 +276,90 @@ def networkd_get_option_from_leases(keyname, leases_d=None): return data[keyname] return None + +def parse_static_routes(rfc3442): + """ parse rfc3442 format and return a list containing tuple of strings. + + The tuple is composed of the network_address (including net length) and + gateway for a parsed static route. + + @param rfc3442: string in rfc3442 format + @returns: list of tuple(str, str) for all valid parsed routes until the + first parsing error. + + E.g. + sr = parse_state_routes("32,169,254,169,254,130,56,248,255,0,130,56,240,1") + sr = [ + ("169.254.169.254/32", "130.56.248.255"), ("0.0.0.0/0", "130.56.240.1") + ] + + Python version of isc-dhclient's hooks: + /etc/dhcp/dhclient-exit-hooks.d/rfc3442-classless-routes + """ + # raw strings from dhcp lease may end in semi-colon + rfc3442 = rfc3442.rstrip(";") + tokens = rfc3442.split(',') + static_routes = [] + + def _trunc_error(cidr, required, remain): + msg = ("RFC3442 string malformed. Current route has CIDR of %s " + "and requires %s significant octets, but only %s remain. " + "Verify DHCP rfc3442-classless-static-routes value: %s" + % (cidr, required, remain, rfc3442)) + LOG.error(msg) + + current_idx = 0 + for idx, tok in enumerate(tokens): + if idx < current_idx: + continue + net_length = int(tok) + if net_length in range(25, 33): + req_toks = 9 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+5]) + gateway = ".".join(tokens[idx+5:idx+req_toks]) + current_idx = idx + req_toks + elif net_length in range(17, 25): + req_toks = 8 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+4] + ["0"]) + gateway = ".".join(tokens[idx+4:idx+req_toks]) + current_idx = idx + req_toks + elif net_length in range(9, 17): + req_toks = 7 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+3] + ["0", "0"]) + gateway = ".".join(tokens[idx+3:idx+req_toks]) + current_idx = idx + req_toks + elif net_length in range(1, 9): + req_toks = 6 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+2] + ["0", "0", "0"]) + gateway = ".".join(tokens[idx+2:idx+req_toks]) + current_idx = idx + req_toks + elif net_length == 0: + req_toks = 5 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = "0.0.0.0" + gateway = ".".join(tokens[idx+1:idx+req_toks]) + current_idx = idx + req_toks + else: + LOG.error('Parsed invalid net length "%s". Verify DHCP ' + 'rfc3442-classless-static-routes value.', net_length) + return static_routes + + static_routes.append(("%s/%s" % (net_address, net_length), gateway)) + + return static_routes + # vi: ts=4 expandtab diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 51390249..91f503c9 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -8,7 +8,8 @@ from textwrap import dedent import cloudinit.net as net from cloudinit.net.dhcp import ( InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, - parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases) + parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases, + parse_static_routes) from cloudinit.util import ensure_file, write_file from cloudinit.tests.helpers import ( CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call) @@ -64,6 +65,123 @@ class TestParseDHCPLeasesFile(CiTestCase): self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) +class TestDHCPRFC3442(CiTestCase): + + def test_parse_lease_finds_rfc3442_classless_static_routes(self): + """parse_dhcp_lease_file returns rfc3442-classless-static-routes.""" + lease_file = self.tmp_path('leases') + content = dedent(""" + lease { + interface "wlp3s0"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + option rfc3442-classless-static-routes 0,130,56,240,1; + renew 4 2017/07/27 18:02:30; + expire 5 2017/07/28 07:08:15; + } + """) + expected = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'rfc3442-classless-static-routes': '0,130,56,240,1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + write_file(lease_file, content) + self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) + + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4): + """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network""" + lease = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'rfc3442-classless-static-routes': '0,130,56,240,1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + m_maybe.return_value = lease + eph = net.dhcp.EphemeralDHCPv4() + eph.obtain_lease() + expected_kwargs = { + 'interface': 'wlp3s0', + 'ip': '192.168.2.74', + 'prefix_or_mask': '255.255.255.0', + 'broadcast': '192.168.2.255', + 'static_routes': [('0.0.0.0/0', '130.56.240.1')], + 'router': '192.168.2.1'} + m_ipv4.assert_called_with(**expected_kwargs) + + +class TestDHCPParseStaticRoutes(CiTestCase): + + with_logs = True + + def parse_static_routes_empty_string(self): + self.assertEqual([], parse_static_routes("")) + + def test_parse_static_routes_invalid_input_returns_empty_list(self): + rfc3442 = "32,169,254,169,254,130,56,248" + self.assertEqual([], parse_static_routes(rfc3442)) + + def test_parse_static_routes_bogus_width_returns_empty_list(self): + rfc3442 = "33,169,254,169,254,130,56,248" + self.assertEqual([], parse_static_routes(rfc3442)) + + def test_parse_static_routes_single_ip(self): + rfc3442 = "32,169,254,169,254,130,56,248,255" + self.assertEqual([('169.254.169.254/32', '130.56.248.255')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_single_ip_handles_trailing_semicolon(self): + rfc3442 = "32,169,254,169,254,130,56,248,255;" + self.assertEqual([('169.254.169.254/32', '130.56.248.255')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_default_route(self): + rfc3442 = "0,130,56,240,1" + self.assertEqual([('0.0.0.0/0', '130.56.240.1')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_class_c_b_a(self): + class_c = "24,192,168,74,192,168,0,4" + class_b = "16,172,16,172,16,0,4" + class_a = "8,10,10,0,0,4" + rfc3442 = ",".join([class_c, class_b, class_a]) + self.assertEqual(sorted([ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ("10.0.0.0/8", "10.0.0.4") + ]), sorted(parse_static_routes(rfc3442))) + + def test_parse_static_routes_logs_error_truncated(self): + bad_rfc3442 = { + "class_c": "24,169,254,169,10", + "class_b": "16,172,16,10", + "class_a": "8,10,10", + "gateway": "0,0", + "netlen": "33,0", + } + for rfc3442 in bad_rfc3442.values(): + self.assertEqual([], parse_static_routes(rfc3442)) + + logs = self.logs.getvalue() + self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines())) + + def test_parse_static_routes_returns_valid_routes_until_parse_err(self): + class_c = "24,192,168,74,192,168,0,4" + class_b = "16,172,16,172,16,0,4" + class_a_error = "8,10,10,0,0" + rfc3442 = ",".join([class_c, class_b, class_a_error]) + self.assertEqual(sorted([ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ]), sorted(parse_static_routes(rfc3442))) + + logs = self.logs.getvalue() + self.assertIn(rfc3442, logs.splitlines()[0]) + + class TestDHCPDiscoveryClean(CiTestCase): with_logs = True diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 6d2affe7..d393e6ad 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -549,6 +549,45 @@ class TestEphemeralIPV4Network(CiTestCase): self.assertEqual(expected_setup_calls, m_subp.call_args_list) m_subp.assert_has_calls(expected_teardown_calls) + def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', + 'static_routes': [('169.254.169.254/32', '192.168.2.1'), + ('0.0.0.0/0', '192.168.2.1')], + 'router': '192.168.2.1'} + expected_setup_calls = [ + mock.call( + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', + 'broadcast', '192.168.2.255', 'dev', 'eth0'], + capture=True, update_env={'LANG': 'C'}), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], + capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '169.254.169.254/32', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '0.0.0.0/0', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] + expected_teardown_calls = [ + mock.call( + ['ip', '-4', 'route', 'del', '0.0.0.0/0', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'del', '169.254.169.254/32', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', + 'eth0', 'down'], capture=True), + mock.call( + ['ip', '-family', 'inet', 'addr', 'del', + '192.168.2.2/24', 'dev', 'eth0'], capture=True) + ] + with net.EphemeralIPv4Network(**params): + self.assertEqual(expected_setup_calls, m_subp.call_args_list) + m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls) + class TestApplyNetworkCfgNames(CiTestCase): V1_CONFIG = textwrap.dedent("""\ diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index f27ef21b..2de2aea2 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1807,7 +1807,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1') + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) self.assertEqual(m_net.call_count, 2) def test__reprovision_calls__poll_imds(self, fake_resp, @@ -1845,7 +1846,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1') + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) self.assertEqual(m_net.call_count, 2) diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 20d59bfd..1ec8e009 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -538,7 +538,8 @@ class TestEc2(test_helpers.HttprettyTestCase): m_dhcp.assert_called_once_with('eth9') m_net.assert_called_once_with( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1') + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) self.assertIn('Crawl of metadata service took', self.logs.getvalue()) -- cgit v1.2.3 From b3a87fc0a2c88585cf77fa9d2756e96183c838f7 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 17 Jul 2019 20:23:42 +0000 Subject: net: update net sequence, include wait on netdevs, opensuse netrules path On systems with many interfaces, processing udev events may take a while. Cloud-init expects devices included in a provided network-configuration to be present when attempting to configure them. This patch adds a step in net configuration where it will check for devices provided in the configuration and if not found, issue udevadm settle commands to wait for them to appear. Additionally, the default path for udev persistent network rules 70-persistent-net.rules may also be written to systems which include the 75-net-generator.rules. During boot, cloud-init and the generator may race and interleave values causing issues. OpenSUSE will now use a newer file, 85-persistent-net-cloud-init.rules which will take precedence over values created by 75-net-generator and avoid collisions on the same file. LP: #1817368 --- cloudinit/distros/opensuse.py | 2 + cloudinit/net/__init__.py | 88 ++++++++++++---- cloudinit/net/tests/test_init.py | 213 +++++++++++++++++++++++++++++++++++++++ cloudinit/stages.py | 27 +++-- cloudinit/tests/test_stages.py | 11 +- tests/unittests/test_net.py | 6 +- 6 files changed, 315 insertions(+), 32 deletions(-) (limited to 'tests') diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 1bfe0478..e41e2f7b 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -38,6 +38,8 @@ class Distro(distros.Distro): 'sysconfig': { 'control': 'etc/sysconfig/network/config', 'iface_templates': '%(base)s/network/ifcfg-%(name)s', + 'netrules_path': ( + 'etc/udev/rules.d/85-persistent-net-cloud-init.rules'), 'route_templates': { 'ipv4': '%(base)s/network/ifroute-%(name)s', 'ipv6': '%(base)s/network/ifroute-%(name)s', diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 624c9b42..f3cec794 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -9,6 +9,7 @@ import errno import logging import os import re +from functools import partial from cloudinit.net.network_state import mask_to_net_prefix from cloudinit import util @@ -292,18 +293,10 @@ def generate_fallback_config(blacklist_drivers=None, config_driver=None): return None -def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): - """read the network config and rename devices accordingly. - if strict_present is false, then do not raise exception if no devices - match. if strict_busy is false, then do not raise exception if the - device cannot be renamed because it is currently configured. - - renames are only attempted for interfaces of type 'physical'. It is - expected that the network system will create other devices with the - correct name in place.""" +def extract_physdevs(netcfg): def _version_1(netcfg): - renames = [] + physdevs = [] for ent in netcfg.get('config', {}): if ent.get('type') != 'physical': continue @@ -317,11 +310,11 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): driver = device_driver(name) if not device_id: device_id = device_devid(name) - renames.append([mac, name, driver, device_id]) - return renames + physdevs.append([mac, name, driver, device_id]) + return physdevs def _version_2(netcfg): - renames = [] + physdevs = [] for ent in netcfg.get('ethernets', {}).values(): # only rename if configured to do so name = ent.get('set-name') @@ -337,16 +330,69 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): driver = device_driver(name) if not device_id: device_id = device_devid(name) - renames.append([mac, name, driver, device_id]) - return renames + physdevs.append([mac, name, driver, device_id]) + return physdevs + + version = netcfg.get('version') + if version == 1: + return _version_1(netcfg) + elif version == 2: + return _version_2(netcfg) + + raise RuntimeError('Unknown network config version: %s' % version) + - if netcfg.get('version') == 1: - return _rename_interfaces(_version_1(netcfg)) - elif netcfg.get('version') == 2: - return _rename_interfaces(_version_2(netcfg)) +def wait_for_physdevs(netcfg, strict=True): + physdevs = extract_physdevs(netcfg) + + # set of expected iface names and mac addrs + expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs]) + expected_macs = set(expected_ifaces.keys()) + + # set of current macs + present_macs = get_interfaces_by_mac().keys() + + # compare the set of expected mac address values to + # the current macs present; we only check MAC as cloud-init + # has not yet renamed interfaces and the netcfg may include + # such renames. + for _ in range(0, 5): + if expected_macs.issubset(present_macs): + LOG.debug('net: all expected physical devices present') + return - raise RuntimeError('Failed to apply network config names. Found bad' - ' network config version: %s' % netcfg.get('version')) + missing = expected_macs.difference(present_macs) + LOG.debug('net: waiting for expected net devices: %s', missing) + for mac in missing: + # trigger a settle, unless this interface exists + syspath = sys_dev_path(expected_ifaces[mac]) + settle = partial(util.udevadm_settle, exists=syspath) + msg = 'Waiting for udev events to settle or %s exists' % syspath + util.log_time(LOG.debug, msg, func=settle) + + # update present_macs after settles + present_macs = get_interfaces_by_mac().keys() + + msg = 'Not all expected physical devices present: %s' % missing + LOG.warning(msg) + if strict: + raise RuntimeError(msg) + + +def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): + """read the network config and rename devices accordingly. + if strict_present is false, then do not raise exception if no devices + match. if strict_busy is false, then do not raise exception if the + device cannot be renamed because it is currently configured. + + renames are only attempted for interfaces of type 'physical'. It is + expected that the network system will create other devices with the + correct name in place.""" + + try: + _rename_interfaces(extract_physdevs(netcfg)) + except RuntimeError as e: + raise RuntimeError('Failed to apply network config names: %s' % e) def interface_has_own_mac(ifname, strict=False): diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index d393e6ad..e6e77d7a 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -708,3 +708,216 @@ class TestHasURLConnectivity(HttprettyTestCase): httpretty.register_uri(httpretty.GET, self.url, body={}, status=404) self.assertFalse( net.has_url_connectivity(self.url), 'Expected False on url fail') + + +def _mk_v1_phys(mac, name, driver, device_id): + v1_cfg = {'type': 'physical', 'name': name, 'mac_address': mac} + params = {} + if driver: + params.update({'driver': driver}) + if device_id: + params.update({'device_id': device_id}) + + if params: + v1_cfg.update({'params': params}) + + return v1_cfg + + +def _mk_v2_phys(mac, name, driver=None, device_id=None): + v2_cfg = {'set-name': name, 'match': {'macaddress': mac}} + if driver: + v2_cfg['match'].update({'driver': driver}) + if device_id: + v2_cfg['match'].update({'device_id': device_id}) + + return v2_cfg + + +class TestExtractPhysdevs(CiTestCase): + + def setUp(self): + super(TestExtractPhysdevs, self).setUp() + self.add_patch('cloudinit.net.device_driver', 'm_driver') + self.add_patch('cloudinit.net.device_devid', 'm_devid') + + def test_extract_physdevs_looks_up_driver_v1(self): + driver = 'virtio' + self.m_driver.return_value = driver + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], + ] + netcfg = { + 'version': 1, + 'config': [_mk_v1_phys(*args) for args in physdevs], + } + # insert the driver value for verification + physdevs[0][2] = driver + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_driver.assert_called_with('eth0') + + def test_extract_physdevs_looks_up_driver_v2(self): + driver = 'virtio' + self.m_driver.return_value = driver + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + } + # insert the driver value for verification + physdevs[0][2] = driver + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_driver.assert_called_with('eth0') + + def test_extract_physdevs_looks_up_devid_v1(self): + devid = '0x1000' + self.m_devid.return_value = devid + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], + ] + netcfg = { + 'version': 1, + 'config': [_mk_v1_phys(*args) for args in physdevs], + } + # insert the driver value for verification + physdevs[0][3] = devid + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_devid.assert_called_with('eth0') + + def test_extract_physdevs_looks_up_devid_v2(self): + devid = '0x1000' + self.m_devid.return_value = devid + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + } + # insert the driver value for verification + physdevs[0][3] = devid + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_devid.assert_called_with('eth0') + + def test_get_v1_type_physical(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], + ] + netcfg = { + 'version': 1, + 'config': [_mk_v1_phys(*args) for args in physdevs], + } + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + + def test_get_v2_type_physical(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + } + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + + def test_get_v2_type_physical_skips_if_no_set_name(self): + netcfg = { + 'version': 2, + 'ethernets': { + 'ens3': { + 'match': {'macaddress': '00:11:22:33:44:55'}, + } + } + } + self.assertEqual([], net.extract_physdevs(netcfg)) + + def test_runtime_error_on_unknown_netcfg_version(self): + with self.assertRaises(RuntimeError): + net.extract_physdevs({'version': 3, 'awesome_config': []}) + + +class TestWaitForPhysdevs(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestWaitForPhysdevs, self).setUp() + self.add_patch('cloudinit.net.get_interfaces_by_mac', + 'm_get_iface_mac') + self.add_patch('cloudinit.util.udevadm_settle', 'm_udev_settle') + + def test_wait_for_physdevs_skips_settle_if_all_present(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) + for args in physdevs}, + } + self.m_get_iface_mac.side_effect = iter([ + {'aa:bb:cc:dd:ee:ff': 'eth0', + '00:11:22:33:44:55': 'ens3'}, + ]) + net.wait_for_physdevs(netcfg) + self.assertEqual(0, self.m_udev_settle.call_count) + + def test_wait_for_physdevs_calls_udev_settle_on_missing(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) + for args in physdevs}, + } + self.m_get_iface_mac.side_effect = iter([ + {'aa:bb:cc:dd:ee:ff': 'eth0'}, # first call ens3 is missing + {'aa:bb:cc:dd:ee:ff': 'eth0', + '00:11:22:33:44:55': 'ens3'}, # second call has both + ]) + net.wait_for_physdevs(netcfg) + self.m_udev_settle.assert_called_with(exists=net.sys_dev_path('ens3')) + + def test_wait_for_physdevs_raise_runtime_error_if_missing_and_strict(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) + for args in physdevs}, + } + self.m_get_iface_mac.return_value = {} + with self.assertRaises(RuntimeError): + net.wait_for_physdevs(netcfg) + + self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count) + + def test_wait_for_physdevs_no_raise_if_not_strict(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) + for args in physdevs}, + } + self.m_get_iface_mac.return_value = {} + net.wait_for_physdevs(netcfg, strict=False) + self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index da7d349a..5f9d47b9 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -644,18 +644,21 @@ class Init(object): return (ncfg, loc) return (self.distro.generate_fallback_config(), "fallback") - def apply_network_config(self, bring_up): - netcfg, src = self._find_networking_config() - if netcfg is None: - LOG.info("network config is disabled by %s", src) - return - + def _apply_netcfg_names(self, netcfg): try: LOG.debug("applying net config names for %s", netcfg) self.distro.apply_network_config_names(netcfg) except Exception as e: LOG.warning("Failed to rename devices: %s", e) + def apply_network_config(self, bring_up): + # get a network config + netcfg, src = self._find_networking_config() + if netcfg is None: + LOG.info("network config is disabled by %s", src) + return + + # request an update if needed/available if self.datasource is not NULL_DATA_SOURCE: if not self.is_new_instance(): if not self.datasource.update_metadata([EventType.BOOT]): @@ -663,8 +666,20 @@ class Init(object): "No network config applied. Neither a new instance" " nor datasource network update on '%s' event", EventType.BOOT) + # nothing new, but ensure proper names + self._apply_netcfg_names(netcfg) return + else: + # refresh netcfg after update + netcfg, src = self._find_networking_config() + + # ensure all physical devices in config are present + net.wait_for_physdevs(netcfg) + + # apply renames from config + self._apply_netcfg_names(netcfg) + # rendering config LOG.info("Applying network configuration from %s bringup=%s: %s", src, bring_up, netcfg) try: diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index 94b6b255..9b483121 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -37,6 +37,7 @@ class FakeDataSource(sources.DataSource): class TestInit(CiTestCase): with_logs = True + allowed_subp = False def setUp(self): super(TestInit, self).setUp() @@ -166,8 +167,9 @@ class TestInit(CiTestCase): 'INFO: network config is disabled by %s' % disable_file, self.logs.getvalue()) + @mock.patch('cloudinit.net.get_interfaces_by_mac') @mock.patch('cloudinit.distros.ubuntu.Distro') - def test_apply_network_on_new_instance(self, m_ubuntu): + def test_apply_network_on_new_instance(self, m_ubuntu, m_macs): """Call distro apply_network_config methods on is_new_instance.""" net_cfg = { 'version': 1, 'config': [ @@ -177,6 +179,8 @@ class TestInit(CiTestCase): def fake_network_config(): return net_cfg, 'fallback' + m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} + self.init._find_networking_config = fake_network_config self.init.apply_network_config(True) self.init.distro.apply_network_config_names.assert_called_with(net_cfg) @@ -206,8 +210,9 @@ class TestInit(CiTestCase): " nor datasource network update on '%s' event" % EventType.BOOT, self.logs.getvalue()) + @mock.patch('cloudinit.net.get_interfaces_by_mac') @mock.patch('cloudinit.distros.ubuntu.Distro') - def test_apply_network_on_datasource_allowed_event(self, m_ubuntu): + def test_apply_network_on_datasource_allowed_event(self, m_ubuntu, m_macs): """Apply network if datasource.update_metadata permits BOOT event.""" old_instance_id = os.path.join( self.init.paths.get_cpath('data'), 'instance-id') @@ -220,6 +225,8 @@ class TestInit(CiTestCase): def fake_network_config(): return net_cfg, 'fallback' + m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} + self.init._find_networking_config = fake_network_config self.init.datasource = FakeDataSource(paths=self.init.paths) self.init.datasource.update_events = {'network': [EventType.BOOT]} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 18efce98..de4e7f4f 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -515,7 +515,7 @@ nameserver 172.19.0.12 [main] dns = none """.lstrip()), - ('etc/udev/rules.d/70-persistent-net.rules', + ('etc/udev/rules.d/85-persistent-net-cloud-init.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], 'out_sysconfig_rhel': [ @@ -619,7 +619,7 @@ nameserver 172.19.0.12 [main] dns = none """.lstrip()), - ('etc/udev/rules.d/70-persistent-net.rules', + ('etc/udev/rules.d/85-persistent-net-cloud-init.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], 'out_sysconfig_rhel': [ @@ -750,7 +750,7 @@ nameserver 172.19.0.12 [main] dns = none """.lstrip()), - ('etc/udev/rules.d/70-persistent-net.rules', + ('etc/udev/rules.d/85-persistent-net-cloud-init.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], 'out_sysconfig_rhel': [ -- cgit v1.2.3 From 5498107d184815fe2e591748e247ab98a4e6d681 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 18 Jul 2019 01:27:36 +0000 Subject: Fix bug rendering MTU on bond or vlan when input was netplan. If input to network_state.parse_net_config_data was netplan (v2 yaml) then the network state would lose the mtu information on bond or vlan. LP: #1836949 --- cloudinit/net/network_state.py | 4 ++ tests/unittests/test_net.py | 91 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) (limited to 'tests') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 3702130a..0ca576b6 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -673,6 +673,8 @@ class NetworkStateInterpreter(object): 'vlan_id': cfg.get('id'), 'vlan_link': cfg.get('link'), } + if 'mtu' in cfg: + vlan_cmd['mtu'] = cfg['mtu'] subnets = self._v2_to_v1_ipcfg(cfg) if len(subnets) > 0: vlan_cmd.update({'subnets': subnets}) @@ -722,6 +724,8 @@ class NetworkStateInterpreter(object): 'params': dict((v2key_to_v1[k], v) for k, v in item_params.get('parameters', {}).items()) } + if 'mtu' in item_cfg: + v1_cmd['mtu'] = item_cfg['mtu'] subnets = self._v2_to_v1_ipcfg(item_cfg) if len(subnets) > 0: v1_cmd.update({'subnets': subnets}) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index de4e7f4f..e2bbb847 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2856,6 +2856,97 @@ USERCTL=no self._compare_files_to_expected(entry['expected_sysconfig'], found) self._assert_headers(found) + def test_from_v2_vlan_mtu(self): + """verify mtu gets rendered on bond when source is netplan.""" + v2data = { + 'version': 2, + 'ethernets': {'eno1': {}}, + 'vlans': { + 'eno1.1000': { + 'addresses': ["192.6.1.9/24"], + 'id': 1000, 'link': 'eno1', 'mtu': 1495}}} + expected = { + 'ifcfg-eno1': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=eno1 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + 'ifcfg-eno1.1000': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=eno1.1000 + IPADDR=192.6.1.9 + MTU=1495 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=eno1 + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + VLAN=yes + """) + } + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + + def test_from_v2_bond_mtu(self): + """verify mtu gets rendered on bond when source is netplan.""" + v2data = { + 'version': 2, + 'bonds': { + 'bond0': {'addresses': ['10.101.8.65/26'], + 'interfaces': ['enp0s0', 'enp0s1'], + 'mtu': 1334, + 'parameters': {}}} + } + expected = { + 'ifcfg-bond0': textwrap.dedent("""\ + BONDING_MASTER=yes + BONDING_SLAVE0=enp0s0 + BONDING_SLAVE1=enp0s1 + BOOTPROTO=none + DEVICE=bond0 + IPADDR=10.101.8.65 + MTU=1334 + NETMASK=255.255.255.192 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Bond + USERCTL=no + """), + 'ifcfg-enp0s0': textwrap.dedent("""\ + BONDING_MASTER=yes + BOOTPROTO=none + DEVICE=enp0s0 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + STARTMODE=auto + TYPE=Bond + USERCTL=no + """), + 'ifcfg-enp0s1': textwrap.dedent("""\ + BONDING_MASTER=yes + BOOTPROTO=none + DEVICE=enp0s1 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + STARTMODE=auto + TYPE=Bond + USERCTL=no + """) + } + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + class TestOpenSuseSysConfigRendering(CiTestCase): -- cgit v1.2.3 From a02c0c9aa24a16f1983a81fe5dbfadac3d7e0ad3 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 18 Jul 2019 19:53:50 +0000 Subject: cloud_tests: updates and fixes - Update paramiko and cryptography module versions (2.4.2) to address issues with algo and deprecation warnings. - Modify ssh keypair generation to work with updated paramiko - tools/xkvm sync with newer version from curtin - Update NoCloudKvm instance.py to work with updated xkvm - pass -name to instance, useful for debugging on shared host - Add cache_mode platform config; default to cache=none,aio=native - Switch to yaml.safe_load() in platforms.py --- integration-requirements.txt | 3 +- tests/cloud_tests/platforms.yaml | 1 + tests/cloud_tests/platforms/nocloudkvm/instance.py | 13 +++-- tests/cloud_tests/platforms/platforms.py | 2 +- tests/cloud_tests/setup_image.py | 3 +- tools/xkvm | 61 +++++++++++++++++++--- 6 files changed, 68 insertions(+), 15 deletions(-) (limited to 'tests') diff --git a/integration-requirements.txt b/integration-requirements.txt index 880d9886..fe5ad45d 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -10,7 +10,8 @@ unittest2 boto3==1.5.9 # ssh communication -paramiko==2.4.1 +paramiko==2.4.2 +cryptography==2.4.2 # lxd backend diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml index 448aa98d..652a7051 100644 --- a/tests/cloud_tests/platforms.yaml +++ b/tests/cloud_tests/platforms.yaml @@ -66,5 +66,6 @@ platforms: {{ config_get("user.vendor-data", properties.default) }} nocloud-kvm: enabled: true + cache_mode: cache=none,aio=native # vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py index 33ff3f24..96185b75 100644 --- a/tests/cloud_tests/platforms/nocloudkvm/instance.py +++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py @@ -74,6 +74,8 @@ class NoCloudKVMInstance(Instance): self.pid_file = None self.console_file = None self.disk = image_path + self.cache_mode = platform.config.get('cache_mode', + 'cache=none,aio=native') self.meta_data = meta_data def shutdown(self, wait=True): @@ -113,7 +115,10 @@ class NoCloudKVMInstance(Instance): pass if self.pid_file: - os.remove(self.pid_file) + try: + os.remove(self.pid_file) + except Exception: + pass self.pid = None self._ssh_close() @@ -160,13 +165,13 @@ class NoCloudKVMInstance(Instance): self.ssh_port = self.get_free_port() cmd = ['./tools/xkvm', - '--disk', '%s,cache=unsafe' % self.disk, - '--disk', '%s,cache=unsafe' % seed, + '--disk', '%s,%s' % (self.disk, self.cache_mode), + '--disk', '%s' % seed, '--netdev', ','.join(['user', 'hostfwd=tcp::%s-:22' % self.ssh_port, 'dnssearch=%s' % CI_DOMAIN]), '--', '-pidfile', self.pid_file, '-vnc', 'none', - '-m', '2G', '-smp', '2', '-nographic', + '-m', '2G', '-smp', '2', '-nographic', '-name', self.name, '-serial', 'file:' + self.console_file] subprocess.Popen(cmd, close_fds=True, diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py index abbfebba..bebdf1c6 100644 --- a/tests/cloud_tests/platforms/platforms.py +++ b/tests/cloud_tests/platforms/platforms.py @@ -48,7 +48,7 @@ class Platform(object): if os.path.exists(filename): c_util.del_file(filename) - c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096', + c_util.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096', '-f', filename, '-P', '', '-C', 'ubuntu@cloud_test'], capture=True) diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py index 39f4517f..a8aaba15 100644 --- a/tests/cloud_tests/setup_image.py +++ b/tests/cloud_tests/setup_image.py @@ -222,7 +222,8 @@ def setup_image(args, image): for name, func, desc in handlers if getattr(args, name, None)] try: - data = yaml.load(image.read_data("/etc/cloud/build.info", decode=True)) + data = yaml.safe_load( + image.read_data("/etc/cloud/build.info", decode=True)) info = ' '.join(["%s=%s" % (k, data.get(k)) for k in ("build_name", "serial") if k in data]) except Exception as e: diff --git a/tools/xkvm b/tools/xkvm index a30ba916..8d44cad7 100755 --- a/tools/xkvm +++ b/tools/xkvm @@ -1,4 +1,6 @@ #!/bin/bash +# This file is part of cloud-init. +# See LICENSE file for copyright and license info. set -f @@ -11,6 +13,8 @@ TAPDEVS=( ) # OVS_CLEANUP gets populated with bridge:devname pairs used with ovs OVS_CLEANUP=( ) MAC_PREFIX="52:54:00:12:34" +# allow this to be set externally. +_QEMU_SUPPORTS_FILE_LOCKING="${_QEMU_SUPPORTS_FILE_LOCKING}" KVM="kvm" declare -A KVM_DEVOPTS @@ -119,6 +123,21 @@ isdevopt() { return 1 } +qemu_supports_file_locking() { + # hackily check if qemu has file.locking in -drive params (LP: #1716028) + if [ -z "$_QEMU_SUPPORTS_FILE_LOCKING" ]; then + # The only way we could find to check presense of file.locking is + # qmp (query-qmp-schema). Simply checking if the virtio-blk driver + # supports 'share-rw' is expected to be equivalent and simpler. + isdevopt virtio-blk share-rw && + _QEMU_SUPPORTS_FILE_LOCKING=true || + _QEMU_SUPPORTS_FILE_LOCKING=false + debug 1 "qemu supports file locking = ${_QEMU_SUPPORTS_FILE_LOCKING}" + fi + [ "$_QEMU_SUPPORTS_FILE_LOCKING" = "true" ] + return +} + padmac() { # return a full mac, given a subset. # assume whatever is input is the last portion to be @@ -367,7 +386,7 @@ main() { [ ${#netdevs[@]} -eq 0 ] && netdevs=( "${DEF_BRIDGE}" ) pt=( "$@" ) - local kvm_pkg="" virtio_scsi_bus="virtio-scsi-pci" + local kvm_pkg="" virtio_scsi_bus="virtio-scsi-pci" virtio_rng_device="virtio-rng-pci" [ -n "$kvm" ] && kvm_pkg="none" case $(uname -m) in i?86) @@ -382,7 +401,10 @@ main() { [ -n "$kvm" ] || { kvm="qemu-system-s390x"; kvm_pkg="qemu-system-misc"; } def_netmodel=${DEF_NETMODEL:-"virtio-net-ccw"} + # disable virtio-scsi-bus virtio_scsi_bus="virtio-scsi-ccw" + virtio_blk_bus="virtio-blk-ccw" + virtio_rng_device="virtio-rng-ccw" ;; ppc64*) [ -n "$kvm" ] || @@ -408,7 +430,7 @@ main() { bios_opts=( "${_RET[@]}" ) local out="" fmt="" bus="" unit="" index="" serial="" driver="" devopts="" - local busorindex="" driveopts="" cur="" val="" file="" + local busorindex="" driveopts="" cur="" val="" file="" wwn="" for((i=0;i<${#diskdevs[@]};i++)); do cur=${diskdevs[$i]} IFS=","; set -- $cur; IFS="$oifs" @@ -420,6 +442,7 @@ main() { unit="" index="" serial="" + wwn="" for tok in "$@"; do [ "${tok#*=}" = "${tok}" -a -f "${tok}" -a -z "$file" ] && file="$tok" val=${tok#*=} @@ -433,6 +456,7 @@ main() { file=*) file=$val;; fmt=*|format=*) fmt=$val;; serial=*) serial=$val;; + wwn=*) wwn=$val;; bus=*) bus=$val;; unit=*) unit=$val;; index=*) index=$val;; @@ -443,14 +467,19 @@ main() { out=$(LANG=C qemu-img info "$file") && fmt=$(echo "$out" | awk '$0 ~ /^file format:/ { print $3 }') || { error "failed to determine format of $file"; return 1; } - else + elif [ -z "$fmt" ]; then fmt=raw fi if [ -z "$driver" ]; then driver="$def_disk_driver" fi if [ -z "$serial" ]; then - serial="${file##*/}" + # use filename as serial if not provided a wwn + if [ -n "$wwn" ]; then + serial="$wwn" + else + serial="${file##*/}" + fi fi # make sure we add either bus= or index= @@ -470,11 +499,21 @@ main() { id=*|if=*|driver=*|$file|file=*) continue;; fmt=*|format=*) continue;; serial=*|bus=*|unit=*|index=*) continue;; + file.locking=*) + qemu_supports_file_locking || { + debug 2 "qemu has no file locking." \ + "Dropping '$tok' from: $cur" + continue + };; esac isdevopt "$driver" "$tok" && devopts="${devopts},$tok" || diskopts="${diskopts},${tok}" done - + case $driver in + virtio-blk-ccw) + # disable scsi when using virtio-blk-ccw + devopts="${devopts},scsi=off";; + esac diskargs=( "${diskargs[@]}" -drive "$diskopts" -device "$devopts" ) done @@ -623,10 +662,16 @@ main() { done local bus_devices - bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" ) - cmd=( "${kvmcmd[@]}" "${archopts[@]}" + if [ -n "${virtio_scsi_bus}" ]; then + bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" ) + fi + local rng_devices + rng_devices=( -object "rng-random,filename=/dev/urandom,id=objrng0" + -device "$virtio_rng_device,rng=objrng0,id=rng0" ) + cmd=( "${kvmcmd[@]}" "${archopts[@]}" "${bios_opts[@]}" "${bus_devices[@]}" + "${rng_devices[@]}" "${netargs[@]}" "${diskargs[@]}" "${pt[@]}" ) local pcmd=$(quote_cmd "${cmd[@]}") @@ -661,4 +706,4 @@ else main "$@" fi -# vi: ts=4 expandtab +# vi: ts=4 expandtab syntax=sh -- cgit v1.2.3 From 1dbede64dc645b090b4047a105143b5d5090d214 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 23 Jul 2019 22:07:11 +0000 Subject: stages: allow data sources to override network config source order Currently, if a platform provides any network configuration via the "cmdline" method (i.e. network-data=... on the kernel command line, ip=... on the kernel command line, or iBFT config via /run/net-*.conf), the value of the data source's network_config property is completely ignored. This means that on platforms that use iSCSI boot (such as Oracle Compute Infrastructure), there is no way for the data source to configure any network interfaces other than those that have already been configured by the initramfs. This change allows data sources to specify the order in which network configuration sources are considered. Data sources that opt to use this mechanism will be expected to consume the command line network data and integrate it themselves. (The generic merging of network configuration sources was considered, but we concluded that the single use case we have presently (a) didn't warrant the increased complexity, and (b) didn't give us a broad enough view to be sure that our generic implementation would be sufficiently generic. This change in no way precludes a merging strategy in future.) --- cloudinit/sources/__init__.py | 16 ++++++ cloudinit/stages.py | 37 ++++++++++---- cloudinit/tests/test_stages.py | 71 ++++++++++++++++++++++---- tests/unittests/test_datasource/test_common.py | 11 ++++ 4 files changed, 116 insertions(+), 19 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6966b31..9d249366 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -66,6 +66,13 @@ CLOUD_ID_REGION_PREFIX_MAP = { 'china': ('azure-china', lambda c: c == 'azure'), # only change azure } +# NetworkConfigSource represents the canonical list of network config sources +# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton +# namedtuple as an enum; see https://stackoverflow.com/a/6971002) +_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback') +NetworkConfigSource = namedtuple('NetworkConfigSource', + _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) + class DataSourceNotFoundException(Exception): pass @@ -153,6 +160,15 @@ class DataSource(object): # Track the discovered fallback nic for use in configuration generation. _fallback_interface = None + # The network configuration sources that should be considered for this data + # source. (The first source in this list that provides network + # configuration will be used without considering any that follow.) This + # should always be a subset of the members of NetworkConfigSource with no + # duplicate entries. + network_config_sources = (NetworkConfigSource.cmdline, + NetworkConfigSource.system_cfg, + NetworkConfigSource.ds) + # read_url_params url_max_wait = -1 # max_wait < 0 means do not wait url_timeout = 10 # timeout for each metadata url read attempt diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5f9d47b9..6bcda2d1 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -24,6 +24,7 @@ from cloudinit.handlers.shell_script import ShellScriptPartHandler from cloudinit.handlers.upstart_job import UpstartJobPartHandler from cloudinit.event import EventType +from cloudinit.sources import NetworkConfigSource from cloudinit import cloud from cloudinit import config @@ -630,19 +631,37 @@ class Init(object): if os.path.exists(disable_file): return (None, disable_file) - cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config()) - dscfg = ('ds', None) + available_cfgs = { + NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(), + NetworkConfigSource.ds: None, + NetworkConfigSource.system_cfg: self.cfg.get('network'), + } + if self.datasource and hasattr(self.datasource, 'network_config'): - dscfg = ('ds', self.datasource.network_config) - sys_cfg = ('system_cfg', self.cfg.get('network')) + available_cfgs[NetworkConfigSource.ds] = ( + self.datasource.network_config) - for loc, ncfg in (cmdline_cfg, sys_cfg, dscfg): + if self.datasource: + order = self.datasource.network_config_sources + else: + order = sources.DataSource.network_config_sources + for cfg_source in order: + if not hasattr(NetworkConfigSource, cfg_source): + LOG.warning('data source specifies an invalid network' + ' cfg_source: %s', cfg_source) + continue + if cfg_source not in available_cfgs: + LOG.warning('data source specifies an unavailable network' + ' cfg_source: %s', cfg_source) + continue + ncfg = available_cfgs[cfg_source] if net.is_disabled_cfg(ncfg): - LOG.debug("network config disabled by %s", loc) - return (None, loc) + LOG.debug("network config disabled by %s", cfg_source) + return (None, cfg_source) if ncfg: - return (ncfg, loc) - return (self.distro.generate_fallback_config(), "fallback") + return (ncfg, cfg_source) + return (self.distro.generate_fallback_config(), + NetworkConfigSource.fallback) def _apply_netcfg_names(self, netcfg): try: diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index 9b483121..7e13e29d 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -6,6 +6,7 @@ import os from cloudinit import stages from cloudinit import sources +from cloudinit.sources import NetworkConfigSource from cloudinit.event import EventType from cloudinit.util import write_file @@ -63,7 +64,7 @@ class TestInit(CiTestCase): """find_networking_config returns when disabled by kernel cmdline.""" m_cmdline.return_value = {'config': 'disabled'} self.assertEqual( - (None, 'cmdline'), + (None, NetworkConfigSource.cmdline), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by cmdline\n', self.logs.getvalue()) @@ -78,7 +79,7 @@ class TestInit(CiTestCase): self.init.datasource = FakeDataSource( network_config={'config': 'disabled'}) self.assertEqual( - (None, 'ds'), + (None, NetworkConfigSource.ds), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by ds\n', self.logs.getvalue()) @@ -90,11 +91,61 @@ class TestInit(CiTestCase): self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {'config': 'disabled'}} self.assertEqual( - (None, 'system_cfg'), + (None, NetworkConfigSource.system_cfg), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by system_cfg\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_uses_datasrc_order(self, m_cmdline): + """find_networking_config should check sources in DS defined order""" + # cmdline, which would normally be preferred over other sources, + # disables networking; in this case, though, the DS moves cmdline later + # so its own config is preferred + m_cmdline.return_value = {'config': 'disabled'} + + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.ds, NetworkConfigSource.system_cfg, + NetworkConfigSource.cmdline] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_invalid_src( + self, m_cmdline): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + 'invalid_src', NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an invalid network' + ' cfg_source: invalid_src', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( + self, m_cmdline): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.fallback, NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an unavailable network' + ' cfg_source: fallback', + self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') def test_wb__find_networking_config_returns_kernel(self, m_cmdline): """find_networking_config returns kernel cmdline config if present.""" @@ -105,7 +156,7 @@ class TestInit(CiTestCase): self.init.datasource = FakeDataSource( network_config={'config': ['fakedatasource']}) self.assertEqual( - (expected_cfg, 'cmdline'), + (expected_cfg, NetworkConfigSource.cmdline), self.init._find_networking_config()) @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') @@ -118,7 +169,7 @@ class TestInit(CiTestCase): self.init.datasource = FakeDataSource( network_config={'config': ['fakedatasource']}) self.assertEqual( - (expected_cfg, 'system_cfg'), + (expected_cfg, NetworkConfigSource.system_cfg), self.init._find_networking_config()) @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') @@ -129,7 +180,7 @@ class TestInit(CiTestCase): expected_cfg = {'config': ['fakedatasource']} self.init.datasource = FakeDataSource(network_config=expected_cfg) self.assertEqual( - (expected_cfg, 'ds'), + (expected_cfg, NetworkConfigSource.ds), self.init._find_networking_config()) @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') @@ -148,7 +199,7 @@ class TestInit(CiTestCase): distro = self.init.distro distro.generate_fallback_config = fake_generate_fallback self.assertEqual( - (fake_cfg, 'fallback'), + (fake_cfg, NetworkConfigSource.fallback), self.init._find_networking_config()) self.assertNotIn('network config disabled', self.logs.getvalue()) @@ -177,7 +228,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} @@ -199,7 +250,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback self.init._find_networking_config = fake_network_config self.init.apply_network_config(True) @@ -223,7 +274,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 6b01a4ea..2a9cfb29 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -83,4 +83,15 @@ class ExpectedDataSources(test_helpers.TestCase): self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) +class TestDataSourceInvariants(test_helpers.TestCase): + + def test_data_sources_have_valid_network_config_sources(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + for cfg_src in ds.network_config_sources: + fail_msg = ('{} has an invalid network_config_sources entry:' + ' {}'.format(str(ds), cfg_src)) + self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), + fail_msg) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 496aaa947ec563bd02b3148f220ff0afe1b32abb Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 26 Jul 2019 20:40:18 +0000 Subject: net/cmdline: split interfaces_by_mac and init network config determination Previously "cmdline" network configuration could be either user-specified network-config=... configuration data, or initramfs-provided configuration data. Before data sources could modify the order in which network config sources were considered, this conflation didn't matter (and, indeed, in the default data source configuration it will continue to not matter). However, it _is_ desirable for a data source to be able to specify that its network configuration should be preferred over the initramfs-provided network configuration but still allow explicit network-config=... configuration passed to the kernel cmdline to continue to override both of those sources. (This also modifies the Oracle data source to use read_initramfs_config directly, which is effectively what it was using read_kernel_cmdline_config for previously.) --- cloudinit/net/cmdline.py | 25 ++++++---- cloudinit/sources/DataSourceOracle.py | 10 ++-- cloudinit/sources/__init__.py | 3 +- cloudinit/sources/tests/test_oracle.py | 19 ++++---- cloudinit/stages.py | 1 + cloudinit/tests/test_stages.py | 83 ++++++++++++++++++++++++++++------ tests/unittests/test_net.py | 22 ++++----- 7 files changed, 112 insertions(+), 51 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index f89a0f73..556a10f3 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -177,21 +177,13 @@ def _is_initramfs_netconfig(files, cmdline): return False -def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): +def read_initramfs_config(files=None, mac_addrs=None, cmdline=None): if cmdline is None: cmdline = util.get_cmdline() if files is None: files = _get_klibc_net_cfg_files() - if 'network-config=' in cmdline: - data64 = None - for tok in cmdline.split(): - if tok.startswith("network-config="): - data64 = tok.split("=", 1)[1] - if data64: - return util.load_yaml(_b64dgz(data64)) - if not _is_initramfs_netconfig(files, cmdline): return None @@ -204,4 +196,19 @@ def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs) + +def read_kernel_cmdline_config(cmdline=None): + if cmdline is None: + cmdline = util.get_cmdline() + + if 'network-config=' in cmdline: + data64 = None + for tok in cmdline.split(): + if tok.startswith("network-config="): + data64 = tok.split("=", 1)[1] + if data64: + return util.load_yaml(_b64dgz(data64)) + + return None + # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 70b9c58a..76cfa38c 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -48,7 +48,7 @@ class DataSourceOracle(sources.DataSource): return False # network may be configured if iscsi root. If that is the case - # then read_kernel_cmdline_config will return non-None. + # then read_initramfs_config will return non-None. if _is_iscsi_root(): data = self.crawl_metadata() else: @@ -118,10 +118,8 @@ class DataSourceOracle(sources.DataSource): We nonetheless return cmdline provided config if present and fallback to generate fallback.""" if self._network_config == sources.UNSET: - cmdline_cfg = cmdline.read_kernel_cmdline_config() - if cmdline_cfg: - self._network_config = cmdline_cfg - else: + self._network_config = cmdline.read_initramfs_config() + if not self._network_config: self._network_config = self.distro.generate_fallback_config() return self._network_config @@ -137,7 +135,7 @@ def _is_platform_viable(): def _is_iscsi_root(): - return bool(cmdline.read_kernel_cmdline_config()) + return bool(cmdline.read_initramfs_config()) def _load_index(content): diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 9d249366..c2baccd5 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -69,7 +69,7 @@ CLOUD_ID_REGION_PREFIX_MAP = { # NetworkConfigSource represents the canonical list of network config sources # that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton # namedtuple as an enum; see https://stackoverflow.com/a/6971002) -_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback') +_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback', 'initramfs') NetworkConfigSource = namedtuple('NetworkConfigSource', _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) @@ -166,6 +166,7 @@ class DataSource(object): # should always be a subset of the members of NetworkConfigSource with no # duplicate entries. network_config_sources = (NetworkConfigSource.cmdline, + NetworkConfigSource.initramfs, NetworkConfigSource.system_cfg, NetworkConfigSource.ds) diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 97d62947..282382c5 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -133,9 +133,9 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertEqual(self.my_md['uuid'], ds.get_instance_id()) self.assertEqual(my_userdata, ds.userdata_raw) - @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) - def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config): + def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config): """network_config should read kernel cmdline.""" distro = mock.MagicMock() ds, _ = self._get_ds(distro=distro, patches={ @@ -145,15 +145,15 @@ class TestDataSourceOracle(test_helpers.CiTestCase): MD_VER: {'system_uuid': self.my_uuid, 'meta_data': self.my_md}}}}) ncfg = {'version': 1, 'config': [{'a': 'b'}]} - m_cmdline_config.return_value = ncfg + m_initramfs_config.return_value = ncfg self.assertTrue(ds._get_data()) self.assertEqual(ncfg, ds.network_config) - m_cmdline_config.assert_called_once_with() + self.assertEqual([mock.call()], m_initramfs_config.call_args_list) self.assertFalse(distro.generate_fallback_config.called) - @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) - def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config): + def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config): """test that fallback network is generated if no kernel cmdline.""" distro = mock.MagicMock() ds, _ = self._get_ds(distro=distro, patches={ @@ -163,18 +163,17 @@ class TestDataSourceOracle(test_helpers.CiTestCase): MD_VER: {'system_uuid': self.my_uuid, 'meta_data': self.my_md}}}}) ncfg = {'version': 1, 'config': [{'a': 'b'}]} - m_cmdline_config.return_value = None + m_initramfs_config.return_value = None self.assertTrue(ds._get_data()) ncfg = {'version': 1, 'config': [{'distro1': 'value'}]} distro.generate_fallback_config.return_value = ncfg self.assertEqual(ncfg, ds.network_config) - m_cmdline_config.assert_called_once_with() + self.assertEqual([mock.call()], m_initramfs_config.call_args_list) distro.generate_fallback_config.assert_called_once_with() - self.assertEqual(1, m_cmdline_config.call_count) # test that the result got cached, and the methods not re-called. self.assertEqual(ncfg, ds.network_config) - self.assertEqual(1, m_cmdline_config.call_count) + self.assertEqual(1, m_initramfs_config.call_count) @mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4())) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 6bcda2d1..50129884 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -633,6 +633,7 @@ class Init(object): available_cfgs = { NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(), + NetworkConfigSource.initramfs: cmdline.read_initramfs_config(), NetworkConfigSource.ds: None, NetworkConfigSource.system_cfg: self.cfg.get('network'), } diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index 7e13e29d..d5c9c0e4 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -59,20 +59,39 @@ class TestInit(CiTestCase): (None, disable_file), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_kernel(self, m_cmdline): + def test_wb__find_networking_config_disabled_by_kernel( + self, m_cmdline, m_initramfs): """find_networking_config returns when disabled by kernel cmdline.""" m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': ['fake_initrd']} self.assertEqual( (None, NetworkConfigSource.cmdline), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by cmdline\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_datasrc(self, m_cmdline): + def test_wb__find_networking_config_disabled_by_initrd( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by kernel cmdline.""" + m_cmdline.return_value = {} + m_initramfs.return_value = {'config': 'disabled'} + self.assertEqual( + (None, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by initramfs\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_datasrc( + self, m_cmdline, m_initramfs): """find_networking_config returns when disabled by datasource cfg.""" m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {}} # system config doesn't disable @@ -84,10 +103,13 @@ class TestInit(CiTestCase): self.assertEqual('DEBUG: network config disabled by ds\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_sysconfig(self, m_cmdline): + def test_wb__find_networking_config_disabled_by_sysconfig( + self, m_cmdline, m_initramfs): """find_networking_config returns when disabled by system config.""" m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {'config': 'disabled'}} self.assertEqual( @@ -96,27 +118,31 @@ class TestInit(CiTestCase): self.assertEqual('DEBUG: network config disabled by system_cfg\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test__find_networking_config_uses_datasrc_order(self, m_cmdline): + def test__find_networking_config_uses_datasrc_order( + self, m_cmdline, m_initramfs): """find_networking_config should check sources in DS defined order""" - # cmdline, which would normally be preferred over other sources, - # disables networking; in this case, though, the DS moves cmdline later - # so its own config is preferred + # cmdline and initramfs, which would normally be preferred over other + # sources, disable networking; in this case, though, the DS moves them + # later so its own config is preferred m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': 'disabled'} ds_net_cfg = {'config': {'needle': True}} self.init.datasource = FakeDataSource(network_config=ds_net_cfg) self.init.datasource.network_config_sources = [ NetworkConfigSource.ds, NetworkConfigSource.system_cfg, - NetworkConfigSource.cmdline] + NetworkConfigSource.cmdline, NetworkConfigSource.initramfs] self.assertEqual( (ds_net_cfg, NetworkConfigSource.ds), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') def test__find_networking_config_warns_if_datasrc_uses_invalid_src( - self, m_cmdline): + self, m_cmdline, m_initramfs): """find_networking_config should check sources in DS defined order""" ds_net_cfg = {'config': {'needle': True}} self.init.datasource = FakeDataSource(network_config=ds_net_cfg) @@ -130,9 +156,10 @@ class TestInit(CiTestCase): ' cfg_source: invalid_src', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( - self, m_cmdline): + self, m_cmdline, m_initramfs): """find_networking_config should check sources in DS defined order""" ds_net_cfg = {'config': {'needle': True}} self.init.datasource = FakeDataSource(network_config=ds_net_cfg) @@ -146,11 +173,14 @@ class TestInit(CiTestCase): ' cfg_source: fallback', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_kernel(self, m_cmdline): + def test_wb__find_networking_config_returns_kernel( + self, m_cmdline, m_initramfs): """find_networking_config returns kernel cmdline config if present.""" expected_cfg = {'config': ['fakekernel']} m_cmdline.return_value = expected_cfg + m_initramfs.return_value = {'config': ['fake_initrd']} self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {'config': ['fakesys_config']}} self.init.datasource = FakeDataSource( @@ -159,10 +189,29 @@ class TestInit(CiTestCase): (expected_cfg, NetworkConfigSource.cmdline), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_initramfs( + self, m_cmdline, m_initramfs): + """find_networking_config returns kernel cmdline config if present.""" + expected_cfg = {'config': ['fake_initrd']} + m_cmdline.return_value = {} + m_initramfs.return_value = expected_cfg + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {'config': ['fakesys_config']}} + self.init.datasource = FakeDataSource( + network_config={'config': ['fakedatasource']}) + self.assertEqual( + (expected_cfg, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_system_cfg(self, m_cmdline): + def test_wb__find_networking_config_returns_system_cfg( + self, m_cmdline, m_initramfs): """find_networking_config returns system config when present.""" m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config expected_cfg = {'config': ['fakesys_config']} self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': expected_cfg} @@ -172,10 +221,13 @@ class TestInit(CiTestCase): (expected_cfg, NetworkConfigSource.system_cfg), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_datasrc_cfg(self, m_cmdline): + def test_wb__find_networking_config_returns_datasrc_cfg( + self, m_cmdline, m_initramfs): """find_networking_config returns datasource net config if present.""" m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config # No system config for network in setUp expected_cfg = {'config': ['fakedatasource']} self.init.datasource = FakeDataSource(network_config=expected_cfg) @@ -183,10 +235,13 @@ class TestInit(CiTestCase): (expected_cfg, NetworkConfigSource.ds), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_fallback(self, m_cmdline): + def test_wb__find_networking_config_returns_fallback( + self, m_cmdline, m_initramfs): """find_networking_config returns fallback config if not defined.""" m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # no initramfs network config # Neither datasource nor system_info disable or provide network fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}], diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e2bbb847..1840ade0 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3558,13 +3558,13 @@ class TestCmdlineConfigParsing(CiTestCase): self.assertEqual(found, self.simple_cfg) -class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): +class TestCmdlineReadInitramfsConfig(FilesystemMockingTestCase): macs = { 'eth0': '14:02:ec:42:48:00', 'eno1': '14:02:ec:42:48:01', } - def test_ip_cmdline_without_ip(self): + def test_without_ip(self): content = {'/run/net-eth0.conf': DHCP_CONTENT_1, cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n"} exp1 = copy.deepcopy(DHCP_EXPECTED_1) @@ -3574,12 +3574,12 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( cmdline='foo root=/root/bar', mac_addrs=self.macs) self.assertEqual(found['version'], 1) self.assertEqual(found['config'], [exp1]) - def test_ip_cmdline_read_kernel_cmdline_ip(self): + def test_with_ip(self): content = {'/run/net-eth0.conf': DHCP_CONTENT_1} exp1 = copy.deepcopy(DHCP_EXPECTED_1) exp1['mac_address'] = self.macs['eth0'] @@ -3588,18 +3588,18 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( cmdline='foo ip=dhcp', mac_addrs=self.macs) self.assertEqual(found['version'], 1) self.assertEqual(found['config'], [exp1]) - def test_ip_cmdline_read_kernel_cmdline_ip6(self): + def test_with_ip6(self): content = {'/run/net6-eno1.conf': DHCP6_CONTENT_1} root = self.tmp_dir() populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( cmdline='foo ip6=dhcp root=/dev/sda', mac_addrs=self.macs) self.assertEqual( @@ -3611,15 +3611,15 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): {'dns_nameservers': ['2001:67c:1562:8010::2:1'], 'control': 'manual', 'type': 'dhcp6', 'netmask': '64'}]}]}) - def test_ip_cmdline_read_kernel_cmdline_none(self): + def test_with_no_ip_or_ip6(self): # if there is no ip= or ip6= on cmdline, return value should be None content = {'net6-eno1.conf': DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) self.assertIsNone(found) - def test_ip_cmdline_both_ip_ip6(self): + def test_with_both_ip_ip6(self): content = { '/run/net-eth0.conf': DHCP_CONTENT_1, '/run/net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')} @@ -3634,7 +3634,7 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( + found = cmdline.read_initramfs_config( cmdline='foo ip=dhcp ip6=dhcp', mac_addrs=self.macs) self.assertEqual(found['version'], 1) -- cgit v1.2.3 From 4dfed67d0e82970f8717d0b524c593962698ca4f Mon Sep 17 00:00:00 2001 From: Chris Glass Date: Thu, 8 Aug 2019 17:09:57 +0000 Subject: New data source for the Exoscale.com cloud platform - dsidentify switches to the new Exoscale datasource on matching DMI name - New Exoscale datasource added Signed-off-by: Mathieu Corbin --- cloudinit/apport.py | 1 + cloudinit/settings.py | 1 + cloudinit/sources/DataSourceExoscale.py | 258 +++++++++++++++++++++++ doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/exoscale.rst | 68 ++++++ tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_exoscale.py | 203 ++++++++++++++++++ tools/ds-identify | 7 +- 8 files changed, 540 insertions(+), 1 deletion(-) create mode 100644 cloudinit/sources/DataSourceExoscale.py create mode 100644 doc/rtd/topics/datasources/exoscale.rst create mode 100644 tests/unittests/test_datasource/test_exoscale.py (limited to 'tests') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 22cb7fde..003ff1ff 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -23,6 +23,7 @@ KNOWN_CLOUD_NAMES = [ 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', + 'Exoscale', 'Hetzner Cloud', 'IBM - (aka SoftLayer or BlueMix)', 'LXD', diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b1ebaade..2060d81f 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -39,6 +39,7 @@ CFG_BUILTIN = { 'Hetzner', 'IBMCloud', 'Oracle', + 'Exoscale', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py new file mode 100644 index 00000000..52e7f6f6 --- /dev/null +++ b/cloudinit/sources/DataSourceExoscale.py @@ -0,0 +1,258 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import ec2_utils as ec2 +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import url_helper +from cloudinit import util + +LOG = logging.getLogger(__name__) + +METADATA_URL = "http://169.254.169.254" +API_VERSION = "1.0" +PASSWORD_SERVER_PORT = 8080 + +URL_TIMEOUT = 10 +URL_RETRIES = 6 + +EXOSCALE_DMI_NAME = "Exoscale" + +BUILTIN_DS_CONFIG = { + # We run the set password config module on every boot in order to enable + # resetting the instance's password via the exoscale console (and a + # subsequent instance reboot). + 'cloud_config_modules': [["set-passwords", "always"]] +} + + +class DataSourceExoscale(sources.DataSource): + + dsname = 'Exoscale' + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) + LOG.debug("Initializing the Exoscale datasource") + + self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL) + self.api_version = self.ds_cfg.get('api_version', API_VERSION) + self.password_server_port = int( + self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) + self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) + self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) + + self.extra_config = BUILTIN_DS_CONFIG + + def wait_for_metadata_service(self): + """Wait for the metadata service to be reachable.""" + + metadata_url = "{}/{}/meta-data/instance-id".format( + self.metadata_url, self.api_version) + + url = url_helper.wait_for_url( + urls=[metadata_url], + max_wait=self.url_max_wait, + timeout=self.url_timeout, + status_cb=LOG.critical) + + return bool(url) + + def crawl_metadata(self): + """ + Crawl the metadata service when available. + + @returns: Dictionary of crawled metadata content. + """ + metadata_ready = util.log_time( + logfunc=LOG.info, + msg='waiting for the metadata service', + func=self.wait_for_metadata_service) + + if not metadata_ready: + return {} + + return read_metadata(self.metadata_url, self.api_version, + self.password_server_port, self.url_timeout, + self.url_retries) + + def _get_data(self): + """Fetch the user data, the metadata and the VM password + from the metadata service. + + Please refer to the datasource documentation for details on how the + metadata server and password server are crawled. + """ + if not self._is_platform_viable(): + return False + + data = util.log_time( + logfunc=LOG.debug, + msg='Crawl of metadata service', + func=self.crawl_metadata) + + if not data: + return False + + self.userdata_raw = data['user-data'] + self.metadata = data['meta-data'] + password = data.get('password') + + password_config = {} + if password: + # Since we have a password, let's make sure we are allowed to use + # it by allowing ssh_pwauth. + # The password module's default behavior is to leave the + # configuration as-is in this regard, so that means it will either + # leave the password always disabled if no password is ever set, or + # leave the password login enabled if we set it once. + password_config = { + 'ssh_pwauth': True, + 'password': password, + 'chpasswd': { + 'expire': False, + }, + } + + # builtin extra_config overrides password_config + self.extra_config = util.mergemanydict( + [self.extra_config, password_config]) + + return True + + def get_config_obj(self): + return self.extra_config + + def _is_platform_viable(self): + return util.read_dmi_data('system-product-name').startswith( + EXOSCALE_DMI_NAME) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +def get_password(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Obtain the VM's password if set. + + Once fetched the password is marked saved. Future calls to this method may + return empty string or 'saved_password'.""" + password_url = "{}:{}/{}/".format(metadata_url, password_server_port, + api_version) + response = url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "send_my_password"}, + timeout=url_timeout, + retries=url_retries) + password = response.contents.decode('utf-8') + # the password is empty or already saved + # Note: the original metadata server would answer an additional + # 'bad_request' status, but the Exoscale implementation does not. + if password in ['', 'saved_password']: + return None + # save the password + url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "saved_password"}, + timeout=url_timeout, + retries=url_retries) + return password + + +def read_metadata(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Query the metadata server and return the retrieved data.""" + crawled_metadata = {} + crawled_metadata['_metadata_api_version'] = api_version + try: + crawled_metadata['user-data'] = ec2.get_instance_userdata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + crawled_metadata['meta-data'] = ec2.get_instance_metadata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + except Exception as e: + util.logexc(LOG, "failed reading from metadata url %s (%s)", + metadata_url, e) + return {} + + try: + crawled_metadata['password'] = get_password( + api_version=api_version, + metadata_url=metadata_url, + password_server_port=password_server_port, + url_retries=url_retries, + url_timeout=url_timeout) + except Exception as e: + util.logexc(LOG, "failed to read from password server url %s:%s (%s)", + metadata_url, password_server_port, e) + + return crawled_metadata + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='Query Exoscale Metadata') + parser.add_argument( + "--endpoint", + metavar="URL", + help="The url of the metadata service.", + default=METADATA_URL) + parser.add_argument( + "--version", + metavar="VERSION", + help="The version of the metadata endpoint to query.", + default=API_VERSION) + parser.add_argument( + "--retries", + metavar="NUM", + type=int, + help="The number of retries querying the endpoint.", + default=URL_RETRIES) + parser.add_argument( + "--timeout", + metavar="NUM", + type=int, + help="The time in seconds to wait before timing out.", + default=URL_TIMEOUT) + parser.add_argument( + "--password-port", + metavar="PORT", + type=int, + help="The port on which the password endpoint listens", + default=PASSWORD_SERVER_PORT) + + args = parser.parse_args() + + data = read_metadata( + metadata_url=args.endpoint, + api_version=args.version, + password_server_port=args.password_port, + url_timeout=args.timeout, + url_retries=args.retries) + + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 648c6068..2148cd5e 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -155,6 +155,7 @@ Follow for more information. datasources/configdrive.rst datasources/digitalocean.rst datasources/ec2.rst + datasources/exoscale.rst datasources/maas.rst datasources/nocloud.rst datasources/opennebula.rst diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst new file mode 100644 index 00000000..27aec9cd --- /dev/null +++ b/doc/rtd/topics/datasources/exoscale.rst @@ -0,0 +1,68 @@ +.. _datasource_exoscale: + +Exoscale +======== + +This datasource supports reading from the metadata server used on the +`Exoscale platform `_. + +Use of the Exoscale datasource is recommended to benefit from new features of +the Exoscale platform. + +The datasource relies on the availability of a compatible metadata server +(``http://169.254.169.254`` is used by default) and its companion password +server, reachable at the same address (by default on port 8080). + +Crawling of metadata +-------------------- + +The metadata service and password server are crawled slightly differently: + + * The "metadata service" is crawled every boot. + * The password server is also crawled every boot (the Exoscale datasource + forces the password module to run with "frequency always"). + +In the password server case, the following rules apply in order to enable the +"restore instance password" functionality: + + * If a password is returned by the password server, it is then marked "saved" + by the cloud-init datasource. Subsequent boots will skip setting the password + (the password server will return "saved_password"). + * When the instance password is reset (via the Exoscale UI), the password + server will return the non-empty password at next boot, therefore causing + cloud-init to reset the instance's password. + +Configuration +------------- + +Users of this datasource are discouraged from changing the default settings +unless instructed to by Exoscale support. + +The following settings are available and can be set for the datasource in system +configuration (in `/etc/cloud/cloud.cfg.d/`). + +The settings available are: + + * **metadata_url**: The URL for the metadata service (defaults to + ``http://169.254.169.254``) + * **api_version**: The API version path on which to query the instance metadata + (defaults to ``1.0``) + * **password_server_port**: The port (on the metadata server) on which the + password server listens (defaults to ``8080``). + * **timeout**: the timeout value provided to urlopen for each individual http + request. (defaults to ``10``) + * **retries**: The number of retries that should be done for an http request + (defaults to ``6``) + + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + Exoscale: + metadata_url: "http://169.254.169.254" + api_version: "1.0" + password_server_port: 8080 + timeout: 10 + retries: 6 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 2a9cfb29..61a7a762 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -13,6 +13,7 @@ from cloudinit.sources import ( DataSourceConfigDrive as ConfigDrive, DataSourceDigitalOcean as DigitalOcean, DataSourceEc2 as Ec2, + DataSourceExoscale as Exoscale, DataSourceGCE as GCE, DataSourceHetzner as Hetzner, DataSourceIBMCloud as IBMCloud, @@ -53,6 +54,7 @@ DEFAULT_NETWORK = [ CloudStack.DataSourceCloudStack, DSNone.DataSourceNone, Ec2.DataSourceEc2, + Exoscale.DataSourceExoscale, GCE.DataSourceGCE, MAAS.DataSourceMAAS, NoCloud.DataSourceNoCloudNet, diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py new file mode 100644 index 00000000..350c3304 --- /dev/null +++ b/tests/unittests/test_datasource/test_exoscale.py @@ -0,0 +1,203 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import helpers +from cloudinit.sources.DataSourceExoscale import ( + API_VERSION, + DataSourceExoscale, + METADATA_URL, + get_password, + PASSWORD_SERVER_PORT, + read_metadata) +from cloudinit.tests.helpers import HttprettyTestCase, mock + +import httpretty +import requests + + +TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, + PASSWORD_SERVER_PORT, + API_VERSION) + +TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, + API_VERSION) + +TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, + API_VERSION) + + +@httpretty.activate +class TestDatasourceExoscale(HttprettyTestCase): + + def setUp(self): + super(TestDatasourceExoscale, self).setUp() + self.tmp = self.tmp_dir() + self.password_url = TEST_PASSWORD_URL + self.metadata_url = TEST_METADATA_URL + self.userdata_url = TEST_USERDATA_URL + + def test_password_saved(self): + """The password is not set when it is not found + in the metadata service.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="saved_password") + self.assertFalse(get_password()) + + def test_password_empty(self): + """No password is set if the metadata service returns + an empty string.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="") + self.assertFalse(get_password()) + + def test_password(self): + """The password is set to what is found in the metadata + service.""" + expected_password = "p@ssw0rd" + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + password = get_password() + self.assertEqual(expected_password, password) + + def test_get_data(self): + """The datasource conforms to expected behavior when supplied + full test data.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_password = "p@ssw0rd" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'ssh_pwauth': True, + 'password': expected_password, + 'cloud_config_modules': [ + ["set-passwords", "always"]], + 'chpasswd': { + 'expire': False, + }}) + + def test_get_data_saved_password(self): + """The datasource conforms to expected behavior when saved_password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "saved_password" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'cloud_config_modules': [ + ["set-passwords", "always"]]}) + + def test_get_data_no_password(self): + """The datasource conforms to expected behavior when no password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'cloud_config_modules': [ + ["set-passwords", "always"]]}) + + @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') + def test_read_metadata_when_password_server_unreachable(self, m_password): + """The read_metadata function returns partial results in case the + password server (only) is unreachable.""" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + + m_password.side_effect = requests.Timeout('Fake Connection Timeout') + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + + result = read_metadata() + + self.assertIsNone(result.get("password")) + self.assertEqual(result.get("user-data").decode("utf-8"), + expected_userdata) + + def test_non_viable_platform(self): + """The datasource fails fast when the platform is not viable.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: False + self.assertFalse(ds._get_data()) diff --git a/tools/ds-identify b/tools/ds-identify index 0305e361..e0d4865c 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -553,6 +553,11 @@ dscheck_CloudStack() { return $DS_NOT_FOUND } +dscheck_Exoscale() { + dmi_product_name_matches "Exoscale*" && return $DS_FOUND + return $DS_NOT_FOUND +} + dscheck_CloudSigma() { # http://paste.ubuntu.com/23624795/ dmi_product_name_matches "CloudSigma" && return $DS_FOUND -- cgit v1.2.3 From 155847209e6a3ed5face91a133d8488a703f3f93 Mon Sep 17 00:00:00 2001 From: Rick Wright Date: Fri, 9 Aug 2019 17:11:05 +0000 Subject: Add support for publishing host keys to GCE guest attributes This adds an empty publish_host_keys() method to the default datasource that is called by cc_ssh.py. This feature can be controlled by the 'ssh_publish_hostkeys' config option. It is enabled by default but can be disabled by setting 'enabled' to false. Also, a blacklist of key types is supported. In addition, this change implements ssh_publish_hostkeys() for the GCE datasource, attempting to write the hostkeys to the instance's guest attributes. Using these hostkeys for ssh connections is currently supported by the alpha version of Google's 'gcloud' command-line tool. (On Google Compute Engine, this feature will be enabled by setting the 'enable-guest-attributes' metadata key to 'true' for the project/instance that you would like to use this feature for. When connecting to the instance for the first time using 'gcloud compute ssh' the hostkeys will be read from the guest attributes for the instance and written to the user's local known_hosts file for Google Compute Engine instances.) --- cloudinit/config/cc_ssh.py | 55 +++++++++ cloudinit/config/tests/test_ssh.py | 166 ++++++++++++++++++++++++++++ cloudinit/sources/DataSourceGCE.py | 22 +++- cloudinit/sources/__init__.py | 10 ++ cloudinit/url_helper.py | 9 +- tests/unittests/test_datasource/test_gce.py | 18 +++ 6 files changed, 274 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index f8f7cb35..53f69399 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -91,6 +91,9 @@ public keys. ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ... - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ... + ssh_publish_hostkeys: + enabled: (Defaults to true) + blacklist: (Defaults to [dsa]) """ import glob @@ -104,6 +107,10 @@ from cloudinit import util GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' +PUBLISH_HOST_KEYS = True +# Don't publish the dsa hostkey by default since OpenSSH recommends not using +# it. +HOST_KEY_PUBLISH_BLACKLIST = ['dsa'] CONFIG_KEY_TO_FILE = {} PRIV_TO_PUB = {} @@ -176,6 +183,23 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Failed generating key type %s to " "file %s", keytype, keyfile) + if "ssh_publish_hostkeys" in cfg: + host_key_blacklist = util.get_cfg_option_list( + cfg["ssh_publish_hostkeys"], "blacklist", + HOST_KEY_PUBLISH_BLACKLIST) + publish_hostkeys = util.get_cfg_option_bool( + cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS) + else: + host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST + publish_hostkeys = PUBLISH_HOST_KEYS + + if publish_hostkeys: + hostkeys = get_public_host_keys(blacklist=host_key_blacklist) + try: + cloud.datasource.publish_host_keys(hostkeys) + except Exception as e: + util.logexc(log, "Publishing host keys failed!") + try: (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) @@ -209,4 +233,35 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): ssh_util.setup_user_keys(keys, 'root', options=key_prefix) + +def get_public_host_keys(blacklist=None): + """Read host keys from /etc/ssh/*.pub files and return them as a list. + + @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa'] + @returns: List of keys, each formatted as a two-element tuple. + e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')] + """ + public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,) + key_list = [] + blacklist_files = [] + if blacklist: + # Convert blacklist to filenames: + # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub' + blacklist_files = [public_key_file_tmpl % (key_type,) + for key_type in blacklist] + # Get list of public key files and filter out blacklisted files. + file_list = [hostfile for hostfile + in glob.glob(public_key_file_tmpl % ('*',)) + if hostfile not in blacklist_files] + + # Read host key files, retrieve first two fields as a tuple and + # append that tuple to key_list. + for file_name in file_list: + file_contents = util.load_file(file_name) + key_data = file_contents.split() + if key_data and len(key_data) > 1: + key_list.append(tuple(key_data[:2])) + return key_list + + # vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py index c8a4271f..e7789842 100644 --- a/cloudinit/config/tests/test_ssh.py +++ b/cloudinit/config/tests/test_ssh.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import os.path from cloudinit.config import cc_ssh from cloudinit import ssh_util @@ -12,6 +13,25 @@ MODPATH = "cloudinit.config.cc_ssh." class TestHandleSsh(CiTestCase): """Test cc_ssh handling of ssh config.""" + def _publish_hostkey_test_setup(self): + self.test_hostkeys = { + 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), + 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), + 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), + 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), + } + self.test_hostkey_files = [] + hostkey_tmpdir = self.tmp_dir() + for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']: + key_data = self.test_hostkeys[key_type] + filename = 'ssh_host_%s_key.pub' % key_type + filepath = os.path.join(hostkey_tmpdir, filename) + self.test_hostkey_files.append(filepath) + with open(filepath, 'w') as f: + f.write(' '.join(key_data)) + + cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') + def test_apply_credentials_with_user(self, m_setup_keys): """Apply keys for the given user and root.""" keys = ["key1"] @@ -64,6 +84,7 @@ class TestHandleSsh(CiTestCase): # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ([], {}) + cc_ssh.PUBLISH_HOST_KEYS = False cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) cc_ssh.handle("name", cfg, cloud, None, None) @@ -149,3 +170,148 @@ class TestHandleSsh(CiTestCase): self.assertEqual([mock.call(set(keys), user), mock.call(set(keys), "root", options="")], m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_default( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_enable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = False + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_disable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': False}} + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) + cloud.datasource.publish_host_keys.assert_not_called() + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': ['dsa', 'rsa']}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_empty_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': []}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['dsa', 'ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, None, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index d8162623..6cbfbbac 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -18,10 +18,13 @@ LOG = logging.getLogger(__name__) MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/' BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL} REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes') +HOSTKEY_NAMESPACE = 'hostkeys' +HEADERS = {'Metadata-Flavor': 'Google'} class GoogleMetadataFetcher(object): - headers = {'Metadata-Flavor': 'Google'} def __init__(self, metadata_address): self.metadata_address = metadata_address @@ -32,7 +35,7 @@ class GoogleMetadataFetcher(object): url = self.metadata_address + path if is_recursive: url += '/?recursive=True' - resp = url_helper.readurl(url=url, headers=self.headers) + resp = url_helper.readurl(url=url, headers=HEADERS) except url_helper.UrlError as exc: msg = "url %s raised exception %s" LOG.debug(msg, path, exc) @@ -90,6 +93,10 @@ class DataSourceGCE(sources.DataSource): public_keys_data = self.metadata['public-keys-data'] return _parse_public_keys(public_keys_data, self.default_user) + def publish_host_keys(self, hostkeys): + for key in hostkeys: + _write_host_key_to_guest_attributes(*key) + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): # GCE has long FDQN's and has asked for short hostnames. return self.metadata['local-hostname'].split('.')[0] @@ -103,6 +110,17 @@ class DataSourceGCE(sources.DataSource): return self.availability_zone.rsplit('-', 1)[0] +def _write_host_key_to_guest_attributes(key_type, key_value): + url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type) + key_value = key_value.encode('utf-8') + resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS, + request_method='PUT', check_status=False) + if resp.ok(): + LOG.debug('Wrote %s host key to guest attributes.', key_type) + else: + LOG.debug('Unable to write %s host key to guest attributes.', key_type) + + def _has_expired(public_key): # Check whether an SSH key is expired. Public key input is a single SSH # public key in the GCE specific key format documented here: diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index c2baccd5..a319322b 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -491,6 +491,16 @@ class DataSource(object): def get_public_ssh_keys(self): return normalize_pubkey_data(self.metadata.get('public-keys')) + def publish_host_keys(self, hostkeys): + """Publish the public SSH host keys (found in /etc/ssh/*.pub). + + @param hostkeys: List of host key tuples (key_type, key_value), + where key_type is the first field in the public key file + (e.g. 'ssh-rsa') and key_value is the key itself + (e.g. 'AAAAB3NzaC1y...'). + """ + pass + def _remap_device(self, short_name): # LP: #611137 # the metadata service may believe that devices are named 'sda' diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0af0d9e3..44ee61d4 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -199,18 +199,19 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, check_status=True, allow_redirects=True, exception_cb=None, - session=None, infinite=False, log_req_resp=True): + session=None, infinite=False, log_req_resp=True, + request_method=None): url = _cleanurl(url) req_args = { 'url': url, } req_args.update(_get_ssl_args(url, ssl_details)) req_args['allow_redirects'] = allow_redirects - req_args['method'] = 'GET' + if not request_method: + request_method = 'POST' if data else 'GET' + req_args['method'] = request_method if timeout is not None: req_args['timeout'] = max(float(timeout), 0) - if data: - req_args['method'] = 'POST' # It doesn't seem like config # was added in older library versions (or newer ones either), thus we # need to manually do the retries if it wasn't... diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 41176c6a..67744d32 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -55,6 +55,8 @@ GCE_USER_DATA_TEXT = { HEADERS = {'Metadata-Flavor': 'Google'} MD_URL_RE = re.compile( r'http://metadata.google.internal/computeMetadata/v1/.*') +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes/hostkeys/') def _set_mock_metadata(gce_meta=None): @@ -341,4 +343,20 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): public_key_data, default_user='default') self.assertEqual(sorted(found), sorted(expected)) + @mock.patch("cloudinit.url_helper.readurl") + def test_publish_host_keys(self, m_readurl): + hostkeys = [('ssh-rsa', 'asdfasdf'), + ('ssh-ed25519', 'qwerqwer')] + readurl_expected_calls = [ + mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), + mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), + ] + self.ds.publish_host_keys(hostkeys) + m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 7f674256c1426ffc419fd6b13e66a58754d94939 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 13 Aug 2019 20:13:05 +0000 Subject: azure/net: generate_fallback_nic emits network v2 config instead of v1 The function generate_fallback_config is used by Azure by default when not consuming IMDS configuration data. This function is also used by any datasource which does not implement it's own network config. This simple fallback configuration sets up dhcp on the most likely NIC. It will now emit network v2 instead of network v1. This is a step toward moving all components talking in v2 and allows us to avoid costly conversions between v1 and v2 for newer distributions which rely on netplan. --- cloudinit/net/__init__.py | 31 +++++--------- cloudinit/net/network_state.py | 12 ++++-- cloudinit/net/tests/test_init.py | 19 +++++---- cloudinit/sources/DataSourceAzure.py | 7 +++- tests/unittests/test_datasource/test_azure.py | 59 ++++++++++++++++++++++++++- tests/unittests/test_net.py | 41 +++++++++++++++++-- 6 files changed, 130 insertions(+), 39 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index f3cec794..ea707c09 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -265,32 +265,23 @@ def find_fallback_nic(blacklist_drivers=None): def generate_fallback_config(blacklist_drivers=None, config_driver=None): - """Determine which attached net dev is most likely to have a connection and - generate network state to run dhcp on that interface""" - + """Generate network cfg v2 for dhcp on the NIC most likely connected.""" if not config_driver: config_driver = False target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers) - if target_name: - target_mac = read_sys_net_safe(target_name, 'address') - nconf = {'config': [], 'version': 1} - cfg = {'type': 'physical', 'name': target_name, - 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} - # inject the device driver name, dev_id into config if enabled and - # device has a valid device driver value - if config_driver: - driver = device_driver(target_name) - if driver: - cfg['params'] = { - 'driver': driver, - 'device_id': device_devid(target_name), - } - nconf['config'].append(cfg) - return nconf - else: + if not target_name: # can't read any interfaces addresses (or there are none); give up return None + target_mac = read_sys_net_safe(target_name, 'address') + cfg = {'dhcp4': True, 'set-name': target_name, + 'match': {'macaddress': target_mac.lower()}} + if config_driver: + driver = device_driver(target_name) + if driver: + cfg['match']['driver'] = driver + nconf = {'ethernets': {target_name: cfg}, 'version': 2} + return nconf def extract_physdevs(netcfg): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 0ca576b6..c0c415d0 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -596,6 +596,7 @@ class NetworkStateInterpreter(object): eno1: match: macaddress: 00:11:22:33:44:55 + driver: hv_netsvc wakeonlan: true dhcp4: true dhcp6: false @@ -631,15 +632,18 @@ class NetworkStateInterpreter(object): 'type': 'physical', 'name': cfg.get('set-name', eth), } - mac_address = cfg.get('match', {}).get('macaddress', None) + match = cfg.get('match', {}) + mac_address = match.get('macaddress', None) if not mac_address: LOG.debug('NetworkState Version2: missing "macaddress" info ' 'in config entry: %s: %s', eth, str(cfg)) - phy_cmd.update({'mac_address': mac_address}) - + phy_cmd['mac_address'] = mac_address + driver = match.get('driver', None) + if driver: + phy_cmd['params'] = {'driver': driver} for key in ['mtu', 'match', 'wakeonlan']: if key in cfg: - phy_cmd.update({key: cfg.get(key)}) + phy_cmd[key] = cfg[key] subnets = self._v2_to_v1_ipcfg(cfg) if len(subnets) > 0: diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index e6e77d7a..d2e38f00 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -212,9 +212,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth1', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth1': {'match': {'macaddress': mac}, + 'dhcp4': True, 'set-name': 'eth1'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_dormant_eth_with_mac(self): @@ -223,9 +223,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True, + 'set-name': 'eth0'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_eth_by_operstate(self): @@ -233,9 +233,10 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': { + 'eth0': {'dhcp4': True, 'match': {'macaddress': mac}, + 'set-name': 'eth0'}}, + 'version': 2} valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown'] for state in valid_operstates: write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d2fad9bb..e6ed2f3b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1241,7 +1241,7 @@ def parse_network_config(imds_metadata): privateIpv4 = addr4['privateIpAddress'] if privateIpv4: if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 + # Append static address config for ip > 1 netPrefix = intf['ipv4']['subnet'][0].get( 'prefix', '24') if not dev_config.get('addresses'): @@ -1251,6 +1251,11 @@ def parse_network_config(imds_metadata): ip=privateIpv4, prefix=netPrefix)) else: dev_config['dhcp4'] = True + # non-primary interfaces should have a higher + # route-metric (cost) so default routes prefer + # primary nic due to lower route-metric value + dev_config['dhcp4-overrides'] = { + 'route-metric': (idx + 1) * 100} for addr6 in intf['ipv6']['ipAddress']: privateIpv6 = addr6['privateIpAddress'] if privateIpv6: diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 2de2aea2..4d57cebc 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -12,6 +12,7 @@ from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, ExitStack, resourceLocation) +import copy import crypt import httpretty import json @@ -129,6 +130,26 @@ NETWORK_METADATA = { } } +SECONDARY_INTERFACE = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.1.5", + } + ] + } +} + MOCKPATH = 'cloudinit.sources.DataSourceAzure.' @@ -619,8 +640,43 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp4': True}}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + def test_network_config_set_from_imds_route_metric_for_secondary_nic(self): + """Datasource.network_config adds route-metric to secondary nics.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + + self.m_get_metadata_from_imds.return_value = imds_data dsrc = self._get_ds(data) dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) @@ -925,6 +981,7 @@ scbus-1 on xpt0 bus 0 expected_cfg = { 'ethernets': { 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}}, 'version': 2} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 1840ade0..4f7e4207 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2156,7 +2156,7 @@ DEFAULT_DEV_ATTRS = { "carrier": False, "dormant": False, "operstate": "down", - "address": "07-1C-C6-75-A4-BE", + "address": "07-1c-c6-75-a4-be", "device/driver": None, "device/device": None, "name_assign_type": "4", @@ -2204,6 +2204,39 @@ class TestGenerateFallbackConfig(CiTestCase): "cloudinit.util.get_cmdline", "m_get_cmdline", return_value="root=/dev/sda1") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_device_driver_v2(self, mock_get_devicelist, mock_read_sys_net, + mock_sys_dev_path): + """Network configuration for generate_fallback_config is version 2.""" + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': '4'}, + 'eth1': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + + } + + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + + network_cfg = net.generate_fallback_config(config_driver=True) + expected = { + 'ethernets': {'eth0': {'dhcp4': True, 'set-name': 'eth0', + 'match': {'macaddress': '00:11:22:33:44:55', + 'driver': 'hv_netsvc'}}}, + 'version': 2} + self.assertEqual(expected, network_cfg) + @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") @@ -2486,7 +2519,7 @@ class TestRhelSysConfigRendering(CiTestCase): # BOOTPROTO=dhcp DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE +HWADDR=07-1c-c6-75-a4-be NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -3030,7 +3063,7 @@ class TestOpenSuseSysConfigRendering(CiTestCase): # BOOTPROTO=dhcp DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE +HWADDR=07-1c-c6-75-a4-be NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -3342,13 +3375,13 @@ class TestNetplanNetRendering(CiTestCase): expected = """ network: - version: 2 ethernets: eth1000: dhcp4: true match: macaddress: 07-1c-c6-75-a4-be set-name: eth1000 + version: 2 """ self.assertEqual(expected.lstrip(), contents.lstrip()) self.assertEqual(1, mock_clean_default.call_count) -- cgit v1.2.3 From 2f3bb764626b9065f4102c7c0a67998a9c174444 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 14 Aug 2019 21:03:13 +0000 Subject: Azure: Record boot timestamps, system information, and diagnostic events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Collect and record the following information through KVP:  + timestamps related to kernel initialization and systemd activation    of cloud-init services  + system information including cloud-init version, kernel version,    distro version, and python version  + diagnostic events for the most common provisioning error issues    such as empty dhcp lease, corrupted ovf-env.xml, etc. + increasing the log frequency of polling IMDS during reprovision. --- cloudinit/sources/DataSourceAzure.py | 157 ++++++++++++++++++++----- cloudinit/sources/helpers/azure.py | 160 ++++++++++++++++++++++++-- tests/unittests/test_datasource/test_azure.py | 15 ++- tests/unittests/test_reporting_hyperv.py | 65 +++++++++++ 4 files changed, 353 insertions(+), 44 deletions(-) mode change 100755 => 100644 tests/unittests/test_reporting_hyperv.py (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e6ed2f3b..4984fa84 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -26,9 +26,14 @@ from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util from cloudinit.reporting import events -from cloudinit.sources.helpers.azure import (azure_ds_reporter, - azure_ds_telemetry_reporter, - get_metadata_from_fabric) +from cloudinit.sources.helpers.azure import ( + azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric, + get_boot_telemetry, + get_system_info, + report_diagnostic_event, + EphemeralDHCPv4WithReporting) LOG = logging.getLogger(__name__) @@ -354,7 +359,7 @@ class DataSourceAzure(sources.DataSource): bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] LOG.debug("ssh authentication: " - "using fingerprint from fabirc") + "using fingerprint from fabric") with events.ReportEventStack( name="waiting-for-ssh-public-key", @@ -419,12 +424,17 @@ class DataSourceAzure(sources.DataSource): ret = load_azure_ds_dir(cdev) except NonAzureDataSource: + report_diagnostic_event( + "Did not find Azure data source in %s" % cdev) continue except BrokenAzureDataSource as exc: msg = 'BrokenAzureDataSource: %s' % exc + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) except util.MountFailedError: - LOG.warning("%s was not mountable", cdev) + msg = '%s was not mountable' % cdev + report_diagnostic_event(msg) + LOG.warning(msg) continue perform_reprovision = reprovision or self._should_reprovision(ret) @@ -432,6 +442,7 @@ class DataSourceAzure(sources.DataSource): if util.is_FreeBSD(): msg = "Free BSD is not supported for PPS VMs" LOG.error(msg) + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( @@ -450,7 +461,9 @@ class DataSourceAzure(sources.DataSource): break if not found: - raise sources.InvalidMetaDataException('No Azure metadata found') + msg = 'No Azure metadata found' + report_diagnostic_event(msg) + raise sources.InvalidMetaDataException(msg) if found == ddir: LOG.debug("using files cached in %s", ddir) @@ -469,9 +482,14 @@ class DataSourceAzure(sources.DataSource): self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral else: - with EphemeralDHCPv4() as lease: - self._report_ready(lease=lease) - + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter) as lease: + self._report_ready(lease=lease) + except Exception as e: + report_diagnostic_event( + "exception while reporting ready: %s" % e) + raise return crawled_data def _is_platform_viable(self): @@ -492,6 +510,16 @@ class DataSourceAzure(sources.DataSource): """ if not self._is_platform_viable(): return False + try: + get_boot_telemetry() + except Exception as e: + LOG.warning("Failed to get boot telemetry: %s", e) + + try: + get_system_info() + except Exception as e: + LOG.warning("Failed to get system information: %s", e) + try: crawled_data = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', @@ -551,27 +579,55 @@ class DataSourceAzure(sources.DataSource): headers = {"Metadata": "true"} nl_sock = None report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) + self.imds_logging_threshold = 1 + self.imds_poll_counter = 1 + dhcp_attempts = 0 + vnet_switched = False + return_val = None def exc_cb(msg, exception): if isinstance(exception, UrlError) and exception.code == 404: + if self.imds_poll_counter == self.imds_logging_threshold: + # Reducing the logging frequency as we are polling IMDS + self.imds_logging_threshold *= 2 + LOG.debug("Call to IMDS with arguments %s failed " + "with status code %s after %s retries", + msg, exception.code, self.imds_poll_counter) + LOG.debug("Backing off logging threshold for the same " + "exception to %d", self.imds_logging_threshold) + self.imds_poll_counter += 1 return True + # If we get an exception while trying to call IMDS, we # call DHCP and setup the ephemeral network to acquire the new IP. + LOG.debug("Call to IMDS with arguments %s failed with " + "status code %s", msg, exception.code) + report_diagnostic_event("polling IMDS failed with exception %s" + % exception.code) return False LOG.debug("Wait for vnetswitch to happen") while True: try: - # Save our EphemeralDHCPv4 context so we avoid repeated dhcp - self._ephemeral_dhcp_ctx = EphemeralDHCPv4() - lease = self._ephemeral_dhcp_ctx.obtain_lease() + # Save our EphemeralDHCPv4 context to avoid repeated dhcp + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=azure_ds_reporter): + self._ephemeral_dhcp_ctx = EphemeralDHCPv4() + lease = self._ephemeral_dhcp_ctx.obtain_lease() + + if vnet_switched: + dhcp_attempts += 1 if report_ready: try: nl_sock = netlink.create_bound_netlink_socket() except netlink.NetlinkCreateSocketError as e: + report_diagnostic_event(e) LOG.warning(e) self._ephemeral_dhcp_ctx.clean_network() - return + break + path = REPORTED_READY_MARKER_FILE LOG.info( "Creating a marker file to report ready: %s", path) @@ -579,17 +635,33 @@ class DataSourceAzure(sources.DataSource): pid=os.getpid(), time=time())) self._report_ready(lease=lease) report_ready = False - try: - netlink.wait_for_media_disconnect_connect( - nl_sock, lease['interface']) - except AssertionError as error: - LOG.error(error) - return + + with events.ReportEventStack( + name="wait-for-media-disconnect-connect", + description="wait for vnet switch", + parent=azure_ds_reporter): + try: + netlink.wait_for_media_disconnect_connect( + nl_sock, lease['interface']) + except AssertionError as error: + report_diagnostic_event(error) + LOG.error(error) + break + + vnet_switched = True self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, - headers=headers, exception_cb=exc_cb, - infinite=True, log_req_resp=False).contents + with events.ReportEventStack( + name="get-reprovision-data-from-imds", + description="get reprovision data from imds", + parent=azure_ds_reporter): + return_val = readurl(url, + timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, + exception_cb=exc_cb, + infinite=True, + log_req_resp=False).contents + break except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -598,6 +670,14 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + if vnet_switched: + report_diagnostic_event("attempted dhcp %d times after reuse" % + dhcp_attempts) + report_diagnostic_event("polled imds %d times after reuse" % + self.imds_poll_counter) + + return return_val + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ @@ -666,9 +746,12 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['agent_command']) try: fabric_data = metadata_func() - except Exception: + except Exception as e: + report_diagnostic_event( + "Error communicating with Azure fabric; You may experience " + "connectivity issues: %s" % e) LOG.warning( - "Error communicating with Azure fabric; You may experience." + "Error communicating with Azure fabric; You may experience " "connectivity issues.", exc_info=True) return False @@ -1027,7 +1110,9 @@ def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: - raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) + error_str = "Invalid ovf-env.xml: %s" % e + report_diagnostic_event(error_str) + raise BrokenAzureDataSource(error_str) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") @@ -1299,8 +1384,13 @@ def get_metadata_from_imds(fallback_nic, retries): if net.is_up(fallback_nic): return util.log_time(**kwargs) else: - with EphemeralDHCPv4(fallback_nic): - return util.log_time(**kwargs) + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter, fallback_nic): + return util.log_time(**kwargs) + except Exception as e: + report_diagnostic_event("exception while getting metadata: %s" % e) + raise @azure_ds_telemetry_reporter @@ -1313,11 +1403,14 @@ def _get_metadata_from_imds(retries): url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, retries=retries, exception_cb=retry_on_url_exc) except Exception as e: - LOG.debug('Ignoring IMDS instance metadata: %s', e) + msg = 'Ignoring IMDS instance metadata: %s' % e + report_diagnostic_event(msg) + LOG.debug(msg) return {} try: return util.load_json(str(response)) - except json.decoder.JSONDecodeError: + except json.decoder.JSONDecodeError as e: + report_diagnostic_event('non-json imds response' % e) LOG.warning( 'Ignoring non-json IMDS instance metadata: %s', str(response)) return {} @@ -1370,8 +1463,10 @@ def _is_platform_viable(seed_dir): asset_tag = util.read_dmi_data('chassis-asset-tag') if asset_tag == AZURE_CHASSIS_ASSET_TAG: return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag + LOG.debug(msg) + evt.description = msg + report_diagnostic_event(msg) if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): return True return False diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 82c4c8c4..f1fba175 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,7 +16,11 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit import version +from cloudinit import distros from cloudinit.reporting import events +from cloudinit.net.dhcp import EphemeralDHCPv4 +from datetime import datetime LOG = logging.getLogger(__name__) @@ -24,6 +28,10 @@ LOG = logging.getLogger(__name__) # value is applied if the endpoint can't be found within a lease file DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" +BOOT_EVENT_TYPE = 'boot-telemetry' +SYSTEMINFO_EVENT_TYPE = 'system-info' +DIAGNOSTIC_EVENT_TYPE = 'diagnostic' + azure_ds_reporter = events.ReportEventStack( name="azure-ds", description="initialize reporter for azure ds", @@ -40,6 +48,105 @@ def azure_ds_telemetry_reporter(func): return impl +@azure_ds_telemetry_reporter +def get_boot_telemetry(): + """Report timestamps related to kernel initialization and systemd + activation of cloud-init""" + if not distros.uses_systemd(): + raise RuntimeError( + "distro not using systemd, skipping boot telemetry") + + LOG.debug("Collecting boot telemetry") + try: + kernel_start = float(time.time()) - float(util.uptime()) + except ValueError: + raise RuntimeError("Failed to determine kernel start timestamp") + + try: + out, _ = util.subp(['/bin/systemctl', + 'show', '-p', + 'UserspaceTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + + if not tsm: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd") + + user_start = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd: %s" + % e) + + try: + out, _ = util.subp(['/bin/systemctl', 'show', + 'cloud-init-local', '-p', + 'InactiveExitTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + if not tsm: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd") + + cloudinit_activation = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd: %s" + % e) + + evt = events.ReportingEvent( + BOOT_EVENT_TYPE, 'boot-telemetry', + "kernel_start=%s user_start=%s cloudinit_activation=%s" % + (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z', + datetime.utcfromtimestamp(user_start).isoformat() + 'Z', + datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'), + events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +@azure_ds_telemetry_reporter +def get_system_info(): + """Collect and report system information""" + info = util.system_info() + evt = events.ReportingEvent( + SYSTEMINFO_EVENT_TYPE, 'system information', + "cloudinit_version=%s, kernel_version=%s, variant=%s, " + "distro_name=%s, distro_version=%s, flavor=%s, " + "python_version=%s" % + (version.version_string(), info['release'], info['variant'], + info['dist'][0], info['dist'][1], info['dist'][2], + info['python']), events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +def report_diagnostic_event(str): + """Report a diagnostic event""" + evt = events.ReportingEvent( + DIAGNOSTIC_EVENT_TYPE, 'diagnostic message', + str, events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -360,16 +467,19 @@ class WALinuxAgentShim(object): value = dhcp245 LOG.debug("Using Azure Endpoint from dhcp options") if value is None: + report_diagnostic_event("No Azure endpoint from dhcp options") LOG.debug('Finding Azure endpoint from networkd...') value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/.json # a dhclient exit hook that calls cloud-init-dhclient-hook + report_diagnostic_event("No Azure endpoint from networkd") LOG.debug('Finding Azure endpoint from hook json...') dhcp_options = WALinuxAgentShim._load_dhclient_json() value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options) if value is None: # Fallback and check the leases file if unsuccessful + report_diagnostic_event("No Azure endpoint from dhclient logs") LOG.debug("Unable to find endpoint in dhclient logs. " " Falling back to check lease files") if fallback_lease_file is None: @@ -381,11 +491,15 @@ class WALinuxAgentShim(object): value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) if value is None: - LOG.warning("No lease found; using default endpoint") + msg = "No lease found; using default endpoint" + report_diagnostic_event(msg) + LOG.warning(msg) value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + msg = 'Azure endpoint found at %s' % endpoint_ip_address + report_diagnostic_event(msg) + LOG.debug(msg) return endpoint_ip_address @azure_ds_telemetry_reporter @@ -399,16 +513,19 @@ class WALinuxAgentShim(object): try: response = http_client.get( 'http://{0}/machine/?comp=goalstate'.format(self.endpoint)) - except Exception: + except Exception as e: if attempts < 10: time.sleep(attempts + 1) else: + report_diagnostic_event( + "failed to register with Azure: %s" % e) raise else: break attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) + report_diagnostic_event("container_id %s" % goal_state.container_id) ssh_keys = [] if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') @@ -449,11 +566,20 @@ class WALinuxAgentShim(object): container_id=goal_state.container_id, instance_id=goal_state.instance_id, ) - http_client.post( - "http://{0}/machine?comp=health".format(self.endpoint), - data=document, - extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, - ) + # Host will collect kvps when cloud-init reports ready. + # some kvps might still be in the queue. We yield the scheduler + # to make sure we process all kvps up till this point. + time.sleep(0) + try: + http_client.post( + "http://{0}/machine?comp=health".format(self.endpoint), + data=document, + extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, + ) + except Exception as e: + report_diagnostic_event("exception while reporting ready: %s" % e) + raise + LOG.info('Reported ready to Azure fabric.') @@ -467,4 +593,22 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, finally: shim.clean_up() + +class EphemeralDHCPv4WithReporting(object): + def __init__(self, reporter, nic=None): + self.reporter = reporter + self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic) + + def __enter__(self): + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=self.reporter): + return self.ephemeralDHCPv4.__enter__() + + def __exit__(self, excp_type, excp_value, excp_traceback): + self.ephemeralDHCPv4.__exit__( + excp_type, excp_value, excp_traceback) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 4d57cebc..3547dd94 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -181,7 +181,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): self.logs.getvalue()) @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') @mock.patch(MOCKPATH + 'net.is_up') def test_get_metadata_performs_dhcp_when_network_is_down( self, m_net_is_up, m_dhcp, m_readurl): @@ -195,7 +195,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): dsaz.get_metadata_from_imds('eth9', retries=2)) m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_called_with('eth9') + m_dhcp.assert_called_with(mock.ANY, 'eth9') self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time self.logs.getvalue()) @@ -552,7 +552,8 @@ scbus-1 on xpt0 bus 0 dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) - @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') @mock.patch( 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') @@ -1308,7 +1309,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) - def test_environment_correct_for_bounce_command(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_environment_correct_for_bounce_command( + self, mock_get_boot_telemetry): interface = 'int0' hostname = 'my-new-host' old_hostname = 'my-old-host' @@ -1324,7 +1327,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(hostname, bounce_env['hostname']) self.assertEqual(old_hostname, bounce_env['old_hostname']) - def test_default_bounce_command_ifup_used_by_default(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_default_bounce_command_ifup_used_by_default( + self, mock_get_boot_telemetry): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py old mode 100755 new mode 100644 index d01ed5b3..640895a4 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -7,9 +7,12 @@ import json import os import struct import time +import re +import mock from cloudinit import util from cloudinit.tests.helpers import CiTestCase +from cloudinit.sources.helpers import azure class TestKvpEncoding(CiTestCase): @@ -126,3 +129,65 @@ class TextKvpReporter(CiTestCase): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) kvps = list(reporter._iterate_kvps(0)) self.assertEqual(0, len(kvps)) + + @mock.patch('cloudinit.distros.uses_systemd') + @mock.patch('cloudinit.util.subp') + def test_get_boot_telemetry(self, m_subp, m_sysd): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" + r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)" + + # get_boot_telemetry makes two subp calls to systemctl. We provide + # a list of values that the subp calls should return + m_subp.side_effect = [ + ('UserspaceTimestampMonotonic=1844838', ''), + ('InactiveExitTimestampMonotonic=3068203', '')] + m_sysd.return_value = True + + reporter.publish_event(azure.get_boot_telemetry()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + evt_msg = kvps[0]['value'] + if not re.search("kernel_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing kernel_start timestamp") + if not re.search("user_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing user_start timestamp") + if not re.search("cloudinit_activation=" + datetime_pattern, + evt_msg): + raise AssertionError( + "missing cloudinit_activation timestamp") + + def test_get_system_info(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + pattern = r"[^=\s]+" + + reporter.publish_event(azure.get_system_info()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + # the most important information is cloudinit version, + # kernel_version, and the distro variant. It is ok if + # if the rest is not available + if not re.search("cloudinit_version=" + pattern, evt_msg): + raise AssertionError("missing cloudinit_version string") + if not re.search("kernel_version=" + pattern, evt_msg): + raise AssertionError("missing kernel_version string") + if not re.search("variant=" + pattern, evt_msg): + raise AssertionError("missing distro variant string") + + def test_report_diagnostic_event(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + + reporter.publish_event( + azure.report_diagnostic_event("test_diagnostic")) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + if "test_diagnostic" not in evt_msg: + raise AssertionError("missing expected diagnostic message") -- cgit v1.2.3 From 85878703e80f536168bcf86cc6444f7aba44cdc3 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 19 Aug 2019 21:37:29 +0000 Subject: ubuntu-drivers: emit latelink=true debconf to accept nvidia eula To accept NVIDIA EULA, cloud-init needs to emit latelink=true debconf setting to the linux-restricted-modules package to allow NVIDIA drivers to properly link to the running kernel. LP: #1840080 --- cloudinit/config/cc_apt_configure.py | 4 +++- cloudinit/config/cc_ubuntu_drivers.py | 10 ++++++++++ cloudinit/config/tests/test_ubuntu_drivers.py | 18 +++++++++++++----- .../test_handler/test_handler_apt_source_v3.py | 11 +++++++++++ 4 files changed, 37 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 919d1995..f01e2aaf 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -332,6 +332,8 @@ def apply_apt(cfg, cloud, target): def debconf_set_selections(selections, target=None): + if not selections.endswith(b'\n'): + selections += b'\n' util.subp(['debconf-set-selections'], data=selections, target=target, capture=True) @@ -374,7 +376,7 @@ def apply_debconf_selections(cfg, target=None): selections = '\n'.join( [selsets[key] for key in sorted(selsets.keys())]) - debconf_set_selections(selections.encode() + b"\n", target=target) + debconf_set_selections(selections.encode(), target=target) # get a complete list of packages listed in input pkgs_cfgd = set() diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py index 91feb603..4da34ee0 100644 --- a/cloudinit/config/cc_ubuntu_drivers.py +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -4,6 +4,7 @@ from textwrap import dedent +from cloudinit.config import cc_apt_configure from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit import log as logging @@ -92,6 +93,15 @@ def install_drivers(cfg, pkg_install_func): LOG.debug("Installing NVIDIA drivers (%s=%s, version=%s)", cfgpath, nv_acc, version_cfg if version_cfg else 'latest') + # Setting NVIDIA latelink confirms acceptance of EULA for the package + # linux-restricted-modules + # Reference code defining debconf variable is here + # https://git.launchpad.net/~ubuntu-kernel/ubuntu/+source/ + # linux-restricted-modules/+git/eoan/tree/debian/templates/ + # nvidia.templates.in + selections = b'linux-restricted-modules linux/nvidia/latelink boolean true' + cc_apt_configure.debconf_set_selections(selections) + try: util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg]) except util.ProcessExecutionError as exc: diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py index efba4ce7..6a763bd9 100644 --- a/cloudinit/config/tests/test_ubuntu_drivers.py +++ b/cloudinit/config/tests/test_ubuntu_drivers.py @@ -28,9 +28,11 @@ class TestUbuntuDrivers(CiTestCase): {'drivers': {'nvidia': {'license-accepted': "TRUE"}}}, schema=drivers.schema, strict=True) + @mock.patch(MPATH + "cc_apt_configure.debconf_set_selections") @mock.patch(MPATH + "util.subp", return_value=('', '')) @mock.patch(MPATH + "util.which", return_value=False) - def _assert_happy_path_taken(self, config, m_which, m_subp): + def _assert_happy_path_taken( + self, config, m_which, m_subp, m_debconf_set_selections): """Positive path test through handle. Package should be installed.""" myCloud = mock.MagicMock() drivers.handle('ubuntu_drivers', config, myCloud, None, None) @@ -38,6 +40,8 @@ class TestUbuntuDrivers(CiTestCase): myCloud.distro.install_packages.call_args_list) self.assertEqual([mock.call(self.install_gpgpu)], m_subp.call_args_list) + m_debconf_set_selections.assert_called_with( + b'linux-restricted-modules linux/nvidia/latelink boolean true') def test_handle_does_package_install(self): self._assert_happy_path_taken(self.cfg_accepted) @@ -48,10 +52,11 @@ class TestUbuntuDrivers(CiTestCase): new_config['drivers']['nvidia']['license-accepted'] = true_value self._assert_happy_path_taken(new_config) + @mock.patch(MPATH + "cc_apt_configure.debconf_set_selections") @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( stdout='No drivers found for installation.\n', exit_code=1)) @mock.patch(MPATH + "util.which", return_value=False) - def test_handle_raises_error_if_no_drivers_found(self, m_which, m_subp): + def test_handle_raises_error_if_no_drivers_found(self, m_which, m_subp, _): """If ubuntu-drivers doesn't install any drivers, raise an error.""" myCloud = mock.MagicMock() with self.assertRaises(Exception): @@ -108,9 +113,10 @@ class TestUbuntuDrivers(CiTestCase): myLog.debug.call_args_list[0][0][0]) self.assertEqual(0, m_install_drivers.call_count) + @mock.patch(MPATH + "cc_apt_configure.debconf_set_selections") @mock.patch(MPATH + "util.subp", return_value=('', '')) @mock.patch(MPATH + "util.which", return_value=True) - def test_install_drivers_no_install_if_present(self, m_which, m_subp): + def test_install_drivers_no_install_if_present(self, m_which, m_subp, _): """If 'ubuntu-drivers' is present, no package install should occur.""" pkg_install = mock.MagicMock() drivers.install_drivers(self.cfg_accepted['drivers'], @@ -128,11 +134,12 @@ class TestUbuntuDrivers(CiTestCase): drivers.install_drivers("mystring", pkg_install_func=pkg_install) self.assertEqual(0, pkg_install.call_count) + @mock.patch(MPATH + "cc_apt_configure.debconf_set_selections") @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2)) @mock.patch(MPATH + "util.which", return_value=False) def test_install_drivers_handles_old_ubuntu_drivers_gracefully( - self, m_which, m_subp): + self, m_which, m_subp, _): """Older ubuntu-drivers versions should emit message and raise error""" myCloud = mock.MagicMock() with self.assertRaises(Exception): @@ -153,9 +160,10 @@ class TestUbuntuDriversWithVersion(TestUbuntuDrivers): 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}} install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] + @mock.patch(MPATH + "cc_apt_configure.debconf_set_selections") @mock.patch(MPATH + "util.subp", return_value=('', '')) @mock.patch(MPATH + "util.which", return_value=False) - def test_version_none_uses_latest(self, m_which, m_subp): + def test_version_none_uses_latest(self, m_which, m_subp, _): myCloud = mock.MagicMock() version_none_cfg = { 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}} diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index 90fe6eed..2f21b6dc 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -998,6 +998,17 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") class TestDebconfSelections(TestCase): + @mock.patch("cloudinit.config.cc_apt_configure.util.subp") + def test_set_sel_appends_newline_if_absent(self, m_subp): + """Automatically append a newline to debconf-set-selections config.""" + selections = b'some/setting boolean true' + cc_apt_configure.debconf_set_selections(selections=selections) + cc_apt_configure.debconf_set_selections(selections=selections + b'\n') + m_call = mock.call( + ['debconf-set-selections'], data=selections + b'\n', capture=True, + target=None) + self.assertEqual([m_call, m_call], m_subp.call_args_list) + @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") def test_no_set_sel_if_none_to_set(self, m_set_sel): cc_apt_configure.apply_debconf_selections({'foo': 'bar'}) -- cgit v1.2.3 From a3926bffc985e5d39056858f65259a4ac438c037 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 22 Aug 2019 21:07:16 +0000 Subject: net/cmdline: refactor to allow multiple initramfs network config sources This refactors read_initramfs_config to support multiple different types of initramfs network configuration. It introduces an InitramfsNetworkConfigSource abstract base class. There is currently a single sub-class, KlibcNetworkConfigSource, which contains the logic which previously was directly within read_initramfs_config. --- cloudinit/net/cmdline.py | 127 +++++++++++++++++++++++++++++++++----------- tests/unittests/test_net.py | 99 +++++++++++++++++++++++++++++----- 2 files changed, 181 insertions(+), 45 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 556a10f3..55166ea8 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -5,20 +5,95 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import abc import base64 import glob import gzip import io import os -from . import get_devicelist -from . import read_sys_net_safe +import six from cloudinit import util +from . import get_devicelist +from . import read_sys_net_safe + _OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" +@six.add_metaclass(abc.ABCMeta) +class InitramfsNetworkConfigSource(object): + """ABC for net config sources that read config written by initramfses""" + + @abc.abstractmethod + def is_applicable(self): + # type: () -> bool + """Is this initramfs config source applicable to the current system?""" + pass + + @abc.abstractmethod + def render_config(self): + # type: () -> dict + """Render a v1 network config from the initramfs configuration""" + pass + + +class KlibcNetworkConfigSource(InitramfsNetworkConfigSource): + """InitramfsNetworkConfigSource for klibc initramfs (i.e. Debian/Ubuntu) + + Has three parameters, but they are intended to make testing simpler, _not_ + for use in production code. (This is indicated by the prepended + underscores.) + """ + + def __init__(self, _files=None, _mac_addrs=None, _cmdline=None): + self._files = _files + self._mac_addrs = _mac_addrs + self._cmdline = _cmdline + + # Set defaults here, as they require computation that we don't want to + # do at method definition time + if self._files is None: + self._files = _get_klibc_net_cfg_files() + if self._cmdline is None: + self._cmdline = util.get_cmdline() + if self._mac_addrs is None: + self._mac_addrs = {} + for k in get_devicelist(): + mac_addr = read_sys_net_safe(k, 'address') + if mac_addr: + self._mac_addrs[k] = mac_addr + + def is_applicable(self): + # type: () -> bool + """ + Return whether this system has klibc initramfs network config or not + + Will return True if: + (a) klibc files exist in /run, AND + (b) either: + (i) ip= or ip6= are on the kernel cmdline, OR + (ii) an open-iscsi interface file is present in the system + """ + if self._files: + if 'ip=' in self._cmdline or 'ip6=' in self._cmdline: + return True + if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE): + # iBft can configure networking without ip= + return True + return False + + def render_config(self): + # type: () -> dict + return config_from_klibc_net_cfg( + files=self._files, mac_addrs=self._mac_addrs, + ) + + +_INITRAMFS_CONFIG_SOURCES = [KlibcNetworkConfigSource] + + def _klibc_to_config_entry(content, mac_addrs=None): """Convert a klibc written shell content file to a 'config' entry When ip= is seen on the kernel command line in debian initramfs @@ -137,6 +212,24 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None): return {'config': entries, 'version': 1} +def read_initramfs_config(): + """ + Return v1 network config for initramfs-configured networking (or None) + + This will consider each _INITRAMFS_CONFIG_SOURCES entry in turn, and return + v1 network configuration for the first one that is applicable. If none are + applicable, return None. + """ + for src_cls in _INITRAMFS_CONFIG_SOURCES: + cfg_source = src_cls() + + if not cfg_source.is_applicable(): + continue + + return cfg_source.render_config() + return None + + def _decomp_gzip(blob, strict=True): # decompress blob. raise exception if not compressed unless strict=False. with io.BytesIO(blob) as iobuf: @@ -167,36 +260,6 @@ def _b64dgz(b64str, gzipped="try"): return _decomp_gzip(blob, strict=gzipped != "try") -def _is_initramfs_netconfig(files, cmdline): - if files: - if 'ip=' in cmdline or 'ip6=' in cmdline: - return True - if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE): - # iBft can configure networking without ip= - return True - return False - - -def read_initramfs_config(files=None, mac_addrs=None, cmdline=None): - if cmdline is None: - cmdline = util.get_cmdline() - - if files is None: - files = _get_klibc_net_cfg_files() - - if not _is_initramfs_netconfig(files, cmdline): - return None - - if mac_addrs is None: - mac_addrs = {} - for k in get_devicelist(): - mac_addr = read_sys_net_safe(k, 'address') - if mac_addr: - mac_addrs[k] = mac_addr - - return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs) - - def read_kernel_cmdline_config(cmdline=None): if cmdline is None: cmdline = util.get_cmdline() diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 4f7e4207..e5789924 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3591,7 +3591,7 @@ class TestCmdlineConfigParsing(CiTestCase): self.assertEqual(found, self.simple_cfg) -class TestCmdlineReadInitramfsConfig(FilesystemMockingTestCase): +class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase): macs = { 'eth0': '14:02:ec:42:48:00', 'eno1': '14:02:ec:42:48:01', @@ -3607,8 +3607,11 @@ class TestCmdlineReadInitramfsConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_initramfs_config( - cmdline='foo root=/root/bar', mac_addrs=self.macs) + src = cmdline.KlibcNetworkConfigSource( + _cmdline='foo root=/root/bar', _mac_addrs=self.macs, + ) + self.assertTrue(src.is_applicable()) + found = src.render_config() self.assertEqual(found['version'], 1) self.assertEqual(found['config'], [exp1]) @@ -3621,8 +3624,11 @@ class TestCmdlineReadInitramfsConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_initramfs_config( - cmdline='foo ip=dhcp', mac_addrs=self.macs) + src = cmdline.KlibcNetworkConfigSource( + _cmdline='foo ip=dhcp', _mac_addrs=self.macs, + ) + self.assertTrue(src.is_applicable()) + found = src.render_config() self.assertEqual(found['version'], 1) self.assertEqual(found['config'], [exp1]) @@ -3632,9 +3638,11 @@ class TestCmdlineReadInitramfsConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_initramfs_config( - cmdline='foo ip6=dhcp root=/dev/sda', - mac_addrs=self.macs) + src = cmdline.KlibcNetworkConfigSource( + _cmdline='foo ip6=dhcp root=/dev/sda', _mac_addrs=self.macs, + ) + self.assertTrue(src.is_applicable()) + found = src.render_config() self.assertEqual( found, {'version': 1, 'config': [ @@ -3648,9 +3656,10 @@ class TestCmdlineReadInitramfsConfig(FilesystemMockingTestCase): # if there is no ip= or ip6= on cmdline, return value should be None content = {'net6-eno1.conf': DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) - found = cmdline.read_initramfs_config( - files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) - self.assertIsNone(found) + src = cmdline.KlibcNetworkConfigSource( + _files=files, _cmdline='foo root=/dev/sda', _mac_addrs=self.macs, + ) + self.assertFalse(src.is_applicable()) def test_with_both_ip_ip6(self): content = { @@ -3667,13 +3676,77 @@ class TestCmdlineReadInitramfsConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_initramfs_config( - cmdline='foo ip=dhcp ip6=dhcp', mac_addrs=self.macs) + src = cmdline.KlibcNetworkConfigSource( + _cmdline='foo ip=dhcp ip6=dhcp', _mac_addrs=self.macs, + ) + self.assertTrue(src.is_applicable()) + found = src.render_config() self.assertEqual(found['version'], 1) self.assertEqual(found['config'], expected) +class TestReadInitramfsConfig(CiTestCase): + + def _config_source_cls_mock(self, is_applicable, render_config=None): + return lambda: mock.Mock( + is_applicable=lambda: is_applicable, + render_config=lambda: render_config, + ) + + def test_no_sources(self): + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', []): + self.assertIsNone(cmdline.read_initramfs_config()) + + def test_no_applicable_sources(self): + sources = [ + self._config_source_cls_mock(is_applicable=False), + self._config_source_cls_mock(is_applicable=False), + self._config_source_cls_mock(is_applicable=False), + ] + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', + sources): + self.assertIsNone(cmdline.read_initramfs_config()) + + def test_one_applicable_source(self): + expected_config = object() + sources = [ + self._config_source_cls_mock( + is_applicable=True, render_config=expected_config, + ), + ] + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', + sources): + self.assertEqual(expected_config, cmdline.read_initramfs_config()) + + def test_one_applicable_source_after_inapplicable_sources(self): + expected_config = object() + sources = [ + self._config_source_cls_mock(is_applicable=False), + self._config_source_cls_mock(is_applicable=False), + self._config_source_cls_mock( + is_applicable=True, render_config=expected_config, + ), + ] + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', + sources): + self.assertEqual(expected_config, cmdline.read_initramfs_config()) + + def test_first_applicable_source_is_used(self): + first_config, second_config = object(), object() + sources = [ + self._config_source_cls_mock( + is_applicable=True, render_config=first_config, + ), + self._config_source_cls_mock( + is_applicable=True, render_config=second_config, + ), + ] + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', + sources): + self.assertEqual(first_config, cmdline.read_initramfs_config()) + + class TestNetplanRoundTrip(CiTestCase): def _render_and_read(self, network_config=None, state=None, netplan_path=None, target=None): -- cgit v1.2.3 From d1b022217a652c7a84d5430c9e571987864d3982 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 28 Aug 2019 00:58:16 +0000 Subject: exoscale: fix sysconfig cloud_config_modules overrides Make sure Exoscale supplements or overrides existing system config setting cloud_config_modules instead of replacing it with a one item list set-passords LP: #1841454 --- cloudinit/sources/DataSourceExoscale.py | 26 ++++++++++++++++-------- tests/unittests/test_datasource/test_exoscale.py | 24 ++++++++++++++-------- 2 files changed, 33 insertions(+), 17 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py index 52e7f6f6..fdfb4ed3 100644 --- a/cloudinit/sources/DataSourceExoscale.py +++ b/cloudinit/sources/DataSourceExoscale.py @@ -6,6 +6,7 @@ from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources +from cloudinit import helpers from cloudinit import url_helper from cloudinit import util @@ -20,13 +21,6 @@ URL_RETRIES = 6 EXOSCALE_DMI_NAME = "Exoscale" -BUILTIN_DS_CONFIG = { - # We run the set password config module on every boot in order to enable - # resetting the instance's password via the exoscale console (and a - # subsequent instance reboot). - 'cloud_config_modules': [["set-passwords", "always"]] -} - class DataSourceExoscale(sources.DataSource): @@ -42,8 +36,22 @@ class DataSourceExoscale(sources.DataSource): self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) - - self.extra_config = BUILTIN_DS_CONFIG + self.extra_config = {} + + def activate(self, cfg, is_new_instance): + """Adjust set-passwords module to run 'always' during each boot""" + # We run the set password config module on every boot in order to + # enable resetting the instance's password via the exoscale console + # (and a subsequent instance reboot). + # Exoscale password server only provides set-passwords user-data if + # a user has triggered a password reset. So calling that password + # service generally results in no additional cloud-config. + # TODO(Create util functions for overriding merged sys_cfg module freq) + mod = 'set_passwords' + sem_path = self.paths.get_ipath_cur('sem') + sem_helper = helpers.FileSemaphores(sem_path) + if sem_helper.clear('config_' + mod, None): + LOG.debug('Overriding module set-passwords with frequency always') def wait_for_metadata_service(self): """Wait for the metadata service to be reachable.""" diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py index 350c3304..f0061199 100644 --- a/tests/unittests/test_datasource/test_exoscale.py +++ b/tests/unittests/test_datasource/test_exoscale.py @@ -11,8 +11,10 @@ from cloudinit.sources.DataSourceExoscale import ( PASSWORD_SERVER_PORT, read_metadata) from cloudinit.tests.helpers import HttprettyTestCase, mock +from cloudinit import util import httpretty +import os import requests @@ -63,6 +65,18 @@ class TestDatasourceExoscale(HttprettyTestCase): password = get_password() self.assertEqual(expected_password, password) + def test_activate_removes_set_passwords_semaphore(self): + """Allow set_passwords to run every boot by removing the semaphore.""" + path = helpers.Paths({'cloud_dir': self.tmp}) + sem_dir = self.tmp_path('instance/sem', dir=self.tmp) + util.ensure_dir(sem_dir) + sem_file = os.path.join(sem_dir, 'config_set_passwords') + with open(sem_file, 'w') as stream: + stream.write('') + ds = DataSourceExoscale({}, None, path) + ds.activate(None, None) + self.assertFalse(os.path.exists(sem_file)) + def test_get_data(self): """The datasource conforms to expected behavior when supplied full test data.""" @@ -95,8 +109,6 @@ class TestDatasourceExoscale(HttprettyTestCase): self.assertEqual(ds.get_config_obj(), {'ssh_pwauth': True, 'password': expected_password, - 'cloud_config_modules': [ - ["set-passwords", "always"]], 'chpasswd': { 'expire': False, }}) @@ -130,9 +142,7 @@ class TestDatasourceExoscale(HttprettyTestCase): self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") self.assertEqual(ds.metadata, {"instance-id": expected_id, "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), - {'cloud_config_modules': [ - ["set-passwords", "always"]]}) + self.assertEqual(ds.get_config_obj(), {}) def test_get_data_no_password(self): """The datasource conforms to expected behavior when no password is @@ -163,9 +173,7 @@ class TestDatasourceExoscale(HttprettyTestCase): self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") self.assertEqual(ds.metadata, {"instance-id": expected_id, "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), - {'cloud_config_modules': [ - ["set-passwords", "always"]]}) + self.assertEqual(ds.get_config_obj(), {}) @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') def test_read_metadata_when_password_server_unreachable(self, m_password): -- cgit v1.2.3 From 45426d8d38a7224962867ba71f390cce653e0d17 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Wed, 11 Sep 2019 18:53:01 +0000 Subject: VMWware: add option into VMTools config to enable/disable custom script. VMWware customization already has support to run a custom script during the VM customization. Adding this option allows a VM administrator to disable the execution of customization scripts. If set the script will not execute and the customization status is set to GUESTCUST_ERROR_SCRIPT_DISABLED. --- cloudinit/sources/DataSourceOVF.py | 21 ++++++- .../sources/helpers/vmware/imc/guestcust_error.py | 1 + .../sources/helpers/vmware/imc/guestcust_util.py | 37 ++++++++++++ tests/unittests/test_datasource/test_ovf.py | 55 +++++++++++++++--- tests/unittests/test_vmware/test_guestcust_util.py | 65 ++++++++++++++++++++++ 5 files changed, 169 insertions(+), 10 deletions(-) create mode 100644 tests/unittests/test_vmware/test_guestcust_util.py (limited to 'tests') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index dd941d2e..b1561892 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -40,11 +40,15 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_state \ from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( enable_nics, get_nics_to_enable, - set_customization_status + set_customization_status, + get_tools_config ) LOG = logging.getLogger(__name__) +CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg" +GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts" + class DataSourceOVF(sources.DataSource): @@ -148,6 +152,21 @@ class DataSourceOVF(sources.DataSource): product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name + custScriptConfig = get_tools_config( + CONFGROUPNAME_GUESTCUSTOMIZATION, + GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, + "true") + if custScriptConfig.lower() == "false": + # Update the customization status if there is a + # custom script is disabled + if special_customization and customscript: + msg = "Custom script is disabled by VM Administrator" + LOG.debug(msg) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED) + raise RuntimeError(msg) + ccScriptsDir = os.path.join( self.paths.get_cpath("scripts"), "per-instance") diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py index db5a00dc..65ae7390 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -10,5 +10,6 @@ class GuestCustErrorEnum(object): """Specifies different errors of Guest Customization engine""" GUESTCUST_ERROR_SUCCESS = 0 + GUESTCUST_ERROR_SCRIPT_DISABLED = 6 # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index a590f323..eb78172e 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -7,6 +7,7 @@ import logging import os +import re import time from cloudinit import util @@ -117,4 +118,40 @@ def enable_nics(nics): logger.warning("Can't connect network interfaces after %d attempts", enableNicsWaitRetries) + +def get_tools_config(section, key, defaultVal): + """ Return the value of [section] key from VMTools configuration. + + @param section: String of section to read from VMTools config + @returns: String value from key in [section] or defaultVal if + [section] is not present or vmware-toolbox-cmd is + not installed. + """ + + if not util.which('vmware-toolbox-cmd'): + logger.debug( + 'vmware-toolbox-cmd not installed, returning default value') + return defaultVal + + retValue = defaultVal + cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key] + + try: + (outText, _) = util.subp(cmd) + m = re.match(r'([a-zA-Z0-9 ]+)=(.*)', outText) + if m: + retValue = m.group(2).strip() + logger.debug("Get tools config: [%s] %s = %s", + section, key, retValue) + else: + logger.debug( + "Tools config: [%s] %s is not found, return default value: %s", + section, key, retValue) + except util.ProcessExecutionError as e: + logger.error("Failed running %s[%s]", cmd, e.exit_code) + logger.exception(e) + + return retValue + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 349d54cc..a615470a 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -169,19 +169,56 @@ class TestDatasourceOVF(CiTestCase): MARKER-ID = 12345345 """) util.write_file(conf_file, conf_content) - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) + with mock.patch(MPATH + 'get_tools_config', return_value='true'): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) customscript = self.tmp_path('test-script', self.tdir) self.assertIn('Script %s not found!!' % customscript, str(context.exception)) + def test_get_data_cust_script_disabled(self): + """If custom script is disabled by VMware tools configuration, + raise a RuntimeError. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + # Prepare the custom sript + customscript = self.tmp_path('test-script', self.tdir) + util.write_file(customscript, "This is the post cust script") + + with mock.patch(MPATH + 'get_tools_config', return_value='false'): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(RuntimeError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + self.assertIn('Custom script is disabled by VM Administrator', + str(context.exception)) + def test_get_data_non_vmware_seed_platform_info(self): """Platform info properly reports when on non-vmware platforms.""" paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py new file mode 100644 index 00000000..b8fa9942 --- /dev/null +++ b/tests/unittests/test_vmware/test_guestcust_util.py @@ -0,0 +1,65 @@ +# Copyright (C) 2019 Canonical Ltd. +# Copyright (C) 2019 VMware INC. +# +# Author: Xiaofeng Wang +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + get_tools_config, +) +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestGuestCustUtil(CiTestCase): + def test_get_tools_config_not_installed(self): + """ + This test is designed to verify the behavior if vmware-toolbox-cmd + is not installed. + """ + with mock.patch.object(util, 'which', return_value=None): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') + + def test_get_tools_config_internal_exception(self): + """ + This test is designed to verify the behavior if internal exception + is raised. + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + with mock.patch.object(util, 'subp', + return_value=('key=value', b''), + side_effect=util.ProcessExecutionError( + "subp failed", exit_code=99)): + # verify return value is 'defaultVal', not 'value'. + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'defaultVal') + + def test_get_tools_config_normal(self): + """ + This test is designed to verify the value could be parsed from + key = value of the given [section] + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + # value is not blank + with mock.patch.object(util, 'subp', + return_value=('key = value ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'value') + # value is blank + with mock.patch.object(util, 'subp', + return_value=('key = ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + '') + # value contains = + with mock.patch.object(util, 'subp', + return_value=('key=Bar=Wark', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'Bar=Wark') + +# vi: ts=4 expandtab -- cgit v1.2.3 From e7f16e215ef4fd6a31ddd2f1b585bbb6508bff06 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 11 Sep 2019 21:09:34 +0000 Subject: Brightbox: restrict detection to require full domain match .brightbox.com The detection for brightbox in both ds-identify and in identify_brightbox would incorrectly match the domain 'bobrightbox', which is not a brightbox platform. The fix here is to restrict matching to '*.brightbox.com' rather than '*brightbox.com' Also, while here remove a url to bug 1661693 which added the knowledge of brightbox. --- cloudinit/sources/DataSourceEc2.py | 2 +- tests/unittests/test_ds_identify.py | 16 ++++++++++++++-- tools/ds-identify | 3 +-- 3 files changed, 16 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 5c017bfb..10107456 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -473,7 +473,7 @@ def identify_aws(data): def identify_brightbox(data): - if data['serial'].endswith('brightbox.com'): + if data['serial'].endswith('.brightbox.com'): return CloudNames.BRIGHTBOX diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 587e6993..de87be29 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -195,6 +195,10 @@ class DsIdentifyBase(CiTestCase): return self._check_via_dict( data, RC_FOUND, dslist=[data.get('ds'), DS_NONE]) + def _test_ds_not_found(self, name): + data = copy.deepcopy(VALID_CFG[name]) + return self._check_via_dict(data, RC_NOT_FOUND) + def _check_via_dict(self, data, rc, dslist=None, **kwargs): ret = self._call_via_dict(data, **kwargs) good = False @@ -244,9 +248,13 @@ class TestDsIdentify(DsIdentifyBase): self._test_ds_found('Ec2-xen') def test_brightbox_is_ec2(self): - """EC2: product_serial ends with 'brightbox.com'""" + """EC2: product_serial ends with '.brightbox.com'""" self._test_ds_found('Ec2-brightbox') + def test_bobrightbox_is_not_brightbox(self): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-brightbox-negative') + def test_gce_by_product_name(self): """GCE identifies itself with product_name.""" self._test_ds_found('GCE') @@ -724,7 +732,11 @@ VALID_CFG = { }, 'Ec2-brightbox': { 'ds': 'Ec2', - 'files': {P_PRODUCT_SERIAL: 'facc6e2f.brightbox.com\n'}, + 'files': {P_PRODUCT_SERIAL: 'srv-otuxg.gb1.brightbox.com\n'}, + }, + 'Ec2-brightbox-negative': { + 'ds': 'Ec2', + 'files': {P_PRODUCT_SERIAL: 'tricky-host.bobrightbox.com\n'}, }, 'GCE': { 'ds': 'GCE', diff --git a/tools/ds-identify b/tools/ds-identify index e0d4865c..2447d14f 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -891,9 +891,8 @@ ec2_identify_platform() { local default="$1" local serial="${DI_DMI_PRODUCT_SERIAL}" - # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693 case "$serial" in - *brightbox.com) _RET="Brightbox"; return 0;; + *.brightbox.com) _RET="Brightbox"; return 0;; esac # AWS http://docs.aws.amazon.com/AWSEC2/ -- cgit v1.2.3 From 571f7c36e89f67f4c2d1cacfd8f9269bf864d560 Mon Sep 17 00:00:00 2001 From: Shixin Ruan Date: Wed, 18 Sep 2019 13:15:25 +0000 Subject: Add datasource for ZStack platform. Zstack platform provides a AWS Ec2 metadata service, and identifies their platform to the guest by setting the 'chassis asset tag' to a string that ends with '.zstack.io'. LP: #1841181 --- cloudinit/apport.py | 1 + cloudinit/sources/DataSourceEc2.py | 16 ++++++++++++- doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/zstack.rst | 36 +++++++++++++++++++++++++++++ tests/unittests/test_datasource/test_ec2.py | 28 ++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 9 +++++++- tools/ds-identify | 5 ++++ 7 files changed, 94 insertions(+), 2 deletions(-) create mode 100644 doc/rtd/topics/datasources/zstack.rst (limited to 'tests') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 003ff1ff..fde1f75b 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -37,6 +37,7 @@ KNOWN_CLOUD_NAMES = [ 'Scaleway', 'SmartOS', 'VMware', + 'ZStack', 'Other'] # Potentially clear text collected logs diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 10107456..6c72ace2 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -33,6 +33,7 @@ class CloudNames(object): ALIYUN = "aliyun" AWS = "aws" BRIGHTBOX = "brightbox" + ZSTACK = "zstack" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "unknown" @@ -477,10 +478,16 @@ def identify_brightbox(data): return CloudNames.BRIGHTBOX +def identify_zstack(data): + if data['asset_tag'].endswith('.zstack.io'): + return CloudNames.ZSTACK + + def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() - checks = (identify_aws, identify_brightbox, lambda x: CloudNames.UNKNOWN) + checks = (identify_aws, identify_brightbox, identify_zstack, + lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) @@ -498,6 +505,7 @@ def _collect_platform_data(): uuid: system-uuid from dmi or /sys/hypervisor uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) + asset_tag: 'dmidecode -s chassis-asset-tag' On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. @@ -520,6 +528,12 @@ def _collect_platform_data(): data['serial'] = serial.lower() + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag is None: + asset_tag = '' + + data['asset_tag'] = asset_tag.lower() + return data diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 8e58be97..a337c08c 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -45,6 +45,7 @@ The following is a list of documents for each supported datasource: datasources/oracle.rst datasources/ovf.rst datasources/smartos.rst + datasources/zstack.rst Creation diff --git a/doc/rtd/topics/datasources/zstack.rst b/doc/rtd/topics/datasources/zstack.rst new file mode 100644 index 00000000..36e60ffb --- /dev/null +++ b/doc/rtd/topics/datasources/zstack.rst @@ -0,0 +1,36 @@ +.. _datasource_zstack: + +ZStack +====== +ZStack platform provides a AWS Ec2 metadata service, but with different datasource identity. +More information about ZStack can be found at `ZStack `__. + +Discovery +--------- +To determine whether a vm running on ZStack platform, cloud-init checks DMI information +by 'dmidecode -s chassis-asset-tag', if the output ends with '.zstack.io', it's running +on ZStack platform: + + +Metadata +^^^^^^^^ +Same as EC2, instance metadata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/meta-data/ + instance-id + local-hostname + +Userdata +^^^^^^^^ +Same as EC2, instance userdata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/user-data/ + meta_data.json + user_data + password + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1ec8e009..6fabf258 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -662,4 +662,32 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): expected, ec2.convert_ec2_metadata_network_config(self.network_metadata)) + +class TesIdentifyPlatform(test_helpers.CiTestCase): + + def collmock(self, **kwargs): + """return non-special _collect_platform_data updated with changes.""" + unspecial = { + 'asset_tag': '3857-0037-2746-7462-1818-3997-77', + 'serial': 'H23-C4J3JV-R6', + 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', + 'uuid_source': 'dmi', + } + unspecial.update(**kwargs) + return unspecial + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack(self, m_collect): + """zstack should be identified if cassis-asset-tag ends in .zstack.io + """ + m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') + self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack_full_domain_only(self, m_collect): + """zstack asset-tag matching should match only on full domain boundary. + """ + m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index de87be29..7aeeb91c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -609,6 +609,10 @@ class TestDsIdentify(DsIdentifyBase): self.assertEqual(expected, [p for p in expected if p in toks], "path did not have expected tokens") + def test_zstack_is_ec2(self): + """EC2: chassis asset tag ends with 'zstack.io'""" + self._test_ds_found('Ec2-ZStack') + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -971,8 +975,11 @@ VALID_CFG = { {'name': 'blkid', 'ret': 2, 'out': ''}, ], 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'}, + }, + 'Ec2-ZStack': { + 'ds': 'Ec2', + 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, } - } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 2447d14f..f76f2a6e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -895,6 +895,11 @@ ec2_identify_platform() { *.brightbox.com) _RET="Brightbox"; return 0;; esac + local asset_tag="${DI_DMI_CHASSIS_ASSET_TAG}" + case "$asset_tag" in + *.zstack.io) _RET="ZStack"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" -- cgit v1.2.3 From 0948cdfbef2052cdf839f24d6a17d457aa9fd4d3 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 26 Sep 2019 19:43:29 +0000 Subject: sysconfig: use distro variant to check if available The sysconfig renderer used the distro name directly which mean some variants of distros were not considered supported. Fix this by using util.system_info()['variant'] instead. Fix the list of KNOWN_DISTROS value for redhat -> rhel. LP: #1843584 --- cloudinit/net/sysconfig.py | 5 ++--- tests/unittests/test_net.py | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index be5dede7..416c1c97 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -18,8 +18,7 @@ from .network_state import ( LOG = logging.getLogger(__name__) NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" -KNOWN_DISTROS = [ - 'opensuse', 'sles', 'suse', 'redhat', 'fedora', 'centos'] +KNOWN_DISTROS = ['centos', 'fedora', 'rhel', 'suse'] def _make_header(sep='#'): @@ -731,7 +730,7 @@ class Renderer(renderer.Renderer): def available(target=None): sysconfig = available_sysconfig(target=target) nm = available_nm(target=target) - return (util.get_linux_distro()[0] in KNOWN_DISTROS + return (util.system_info()['variant'] in KNOWN_DISTROS and any([nm, sysconfig])) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e5789924..a093cf10 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -4158,6 +4158,24 @@ class TestNetRenderers(CiTestCase): m_distro.return_value = ('opensuse', None, None) self.assertEqual('sysconfig', renderers.select(priority=None)[0]) + @mock.patch("cloudinit.net.sysconfig.available_sysconfig") + @mock.patch("cloudinit.util.get_linux_distro") + def test_sysconfig_available_uses_variant_mapping(self, m_distro, m_avail): + m_avail.return_value = True + distro_values = [ + ('opensuse', '', ''), + ('opensuse-leap', '', ''), + ('opensuse-tumbleweed', '', ''), + ('sles', '', ''), + ('centos', '', ''), + ('fedora', '', ''), + ('redhat', '', ''), + ] + for (distro_name, distro_version, flavor) in distro_values: + m_distro.return_value = (distro_name, distro_version, flavor) + result = sysconfig.available() + self.assertTrue(result) + class TestGetInterfaces(CiTestCase): _data = {'bonds': ['bond1'], -- cgit v1.2.3 From f80f7f0d82eaf05e68419c7ac87bf5d4c934b796 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 26 Sep 2019 19:52:02 +0000 Subject: sysconfig: only write resolv.conf if network_state has DNS values If an OS image provided an /etc/resolv.conf file that was not empty cloud-init would read and re-write it with a cloud-init header even if no DNS network configuration was provided (e.g. DHCP only). This can cause problems for some network services which don't ignore cloud-init's header. LP: #1843634 --- cloudinit/net/sysconfig.py | 6 ++++-- tests/unittests/test_net.py | 12 ++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 416c1c97..87b548e5 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -577,6 +577,10 @@ class Renderer(renderer.Renderer): @staticmethod def _render_dns(network_state, existing_dns_path=None): + # skip writing resolv.conf if network_state doesn't include any input. + if not any([len(network_state.dns_nameservers), + len(network_state.dns_searchdomains)]): + return None content = resolv_conf.ResolvConf("") if existing_dns_path and os.path.isfile(existing_dns_path): content = resolv_conf.ResolvConf(util.load_file(existing_dns_path)) @@ -584,8 +588,6 @@ class Renderer(renderer.Renderer): content.add_nameserver(nameserver) for searchdomain in network_state.dns_searchdomains: content.add_search_domain(searchdomain) - if not str(content): - return None header = _make_header(';') content_str = str(content) if not content_str.startswith(header): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index a093cf10..b6597412 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2701,6 +2701,10 @@ USERCTL=no ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") os.makedirs(render_dir) + # write an etc/resolv.conf and expect it to not be modified + resolvconf = os.path.join(render_dir, 'etc/resolv.conf') + resolvconf_content = "# Original Content" + util.write_file(resolvconf, resolvconf_content) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) @@ -2718,6 +2722,8 @@ TYPE=Ethernet USERCTL=no """ self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + # a dhcp only config should not modify resolv.conf + self.assertEqual(resolvconf_content, found['/etc/resolv.conf']) def test_bond_config(self): expected_name = 'expected_sysconfig_rhel' @@ -3202,6 +3208,10 @@ USERCTL=no ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") os.makedirs(render_dir) + # write an etc/resolv.conf and expect it to not be modified + resolvconf = os.path.join(render_dir, 'etc/resolv.conf') + resolvconf_content = "# Original Content" + util.write_file(resolvconf, resolvconf_content) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) @@ -3219,6 +3229,8 @@ TYPE=Ethernet USERCTL=no """ self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + # a dhcp only config should not modify resolv.conf + self.assertEqual(resolvconf_content, found['/etc/resolv.conf']) def test_bond_config(self): expected_name = 'expected_sysconfig_opensuse' -- cgit v1.2.3 From 762f230cb37bdceaed3fb264e9cede48c2749d3f Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 27 Sep 2019 14:48:52 +0000 Subject: ovf: do not generate random instance-id for IMC customization path Cloud-init will not operate properly if the instance-id value changes on each boot. This is the source of a number of behavioral bugs filed against cloud-init with OVF datasource. Instead, use a static instance-id value, iid-vmware-imc, similar to iid-dsovf. --- cloudinit/sources/DataSourceOVF.py | 4 +--- tests/unittests/test_vmware_config_file.py | 6 +++--- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index b1561892..e7794aab 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -406,9 +406,7 @@ def read_vmware_imc(config): if config.timezone: cfg['timezone'] = config.timezone - # Generate a unique instance-id so that re-customization will - # happen in cloud-init - md['instance-id'] = "iid-vmware-" + util.rand_str(strlen=8) + md['instance-id'] = "iid-vmware-imc" return (md, ud, cfg) diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index f47335ea..16343ed2 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -62,13 +62,13 @@ class TestVmwareConfigFile(CiTestCase): (md1, _, _) = read_vmware_imc(conf) self.assertIn(instance_id_prefix, md1["instance-id"]) - self.assertEqual(len(md1["instance-id"]), len(instance_id_prefix) + 8) + self.assertEqual(md1["instance-id"], 'iid-vmware-imc') (md2, _, _) = read_vmware_imc(conf) self.assertIn(instance_id_prefix, md2["instance-id"]) - self.assertEqual(len(md2["instance-id"]), len(instance_id_prefix) + 8) + self.assertEqual(md2["instance-id"], 'iid-vmware-imc') - self.assertNotEqual(md1["instance-id"], md2["instance-id"]) + self.assertEqual(md2["instance-id"], md1["instance-id"]) def test_configfile_static_2nics(self): """Tests Config class for a configuration with two static NICs.""" -- cgit v1.2.3 From 052d655cfba37243396c491a1e7892ba3736ab6f Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 27 Sep 2019 20:31:57 +0000 Subject: debian/ubuntu: add missing word to netplan/ENI header Specifically, add in "reboot" to make it clear what people should expect when modifying the file. This also renames the variable to indicate it is used for netplan and ENI, not just ENI. LP: #1845669 --- cloudinit/distros/debian.py | 11 ++++++----- cloudinit/distros/ubuntu.py | 4 ++-- tests/unittests/test_distros/test_netconfig.py | 18 +++++++++--------- 3 files changed, 17 insertions(+), 16 deletions(-) (limited to 'tests') diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 0ad93ffe..cf082c73 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -29,9 +29,10 @@ APT_GET_WRAPPER = { 'enabled': 'auto', } -ENI_HEADER = """# This file is generated from information provided by -# the datasource. Changes to it will not persist across an instance. -# To disable cloud-init's network configuration capabilities, write a file +NETWORK_FILE_HEADER = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: # network: {config: disabled} """ @@ -48,9 +49,9 @@ class Distro(distros.Distro): } renderer_configs = { "eni": {"eni_path": network_conf_fn["eni"], - "eni_header": ENI_HEADER}, + "eni_header": NETWORK_FILE_HEADER}, "netplan": {"netplan_path": network_conf_fn["netplan"], - "netplan_header": ENI_HEADER, + "netplan_header": NETWORK_FILE_HEADER, "postcmds": True} } diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index e5fcbc58..23be3bdd 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -30,9 +30,9 @@ class Distro(debian.Distro): } self.renderer_configs = { "eni": {"eni_path": self.network_conf_fn["eni"], - "eni_header": debian.ENI_HEADER}, + "eni_header": debian.NETWORK_FILE_HEADER}, "netplan": {"netplan_path": self.network_conf_fn["netplan"], - "netplan_header": debian.ENI_HEADER, + "netplan_header": debian.NETWORK_FILE_HEADER, "postcmds": True} } diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 07b5c0a7..67209955 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -91,9 +91,9 @@ V1_NET_CFG = {'config': [{'name': 'eth0', 'version': 1} V1_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by -# the datasource. Changes to it will not persist across an instance. -# To disable cloud-init's network configuration capabilities, write a file +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: # network: {config: disabled} auto lo @@ -125,9 +125,9 @@ V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', V1_TO_V2_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by -# the datasource. Changes to it will not persist across an instance. -# To disable cloud-init's network configuration capabilities, write a file +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: # network: {config: disabled} network: @@ -154,9 +154,9 @@ V2_NET_CFG = { V2_TO_V2_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by -# the datasource. Changes to it will not persist across an instance. -# To disable cloud-init's network configuration capabilities, write a file +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: # network: {config: disabled} network: -- cgit v1.2.3 From d3b1c4ae6bd237a04ba5df4306ff38f752f72132 Mon Sep 17 00:00:00 2001 From: Adam Dobrawy Date: Fri, 4 Oct 2019 23:15:10 +0000 Subject: Add RbxCloud datasource --- cloudinit/sources/DataSourceRbxCloud.py | 250 ++++++++++++++++++++++++++++ doc/rtd/topics/datasources/rbxcloud.rst | 25 +++ tests/unittests/test_datasource/test_rbx.py | 208 +++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 17 +- tools/ds-identify | 7 +- 5 files changed, 505 insertions(+), 2 deletions(-) create mode 100644 cloudinit/sources/DataSourceRbxCloud.py create mode 100644 doc/rtd/topics/datasources/rbxcloud.rst create mode 100644 tests/unittests/test_datasource/test_rbx.py (limited to 'tests') diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py new file mode 100644 index 00000000..9a8c3d5c --- /dev/null +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -0,0 +1,250 @@ +# Copyright (C) 2018 Warsaw Data Center +# +# Author: Malwina Leis +# Author: Grzegorz Brzeski +# Author: Adam Dobrawy +# +# This file is part of cloud-init. See LICENSE file for license information. +""" +This file contains code used to gather the user data passed to an +instance on rootbox / hyperone cloud platforms +""" +import errno +import os +import os.path + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit.event import EventType + +LOG = logging.getLogger(__name__) +ETC_HOSTS = '/etc/hosts' + + +def get_manage_etc_hosts(): + hosts = util.load_file(ETC_HOSTS, quiet=True) + if hosts: + LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False') + return False + LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True') + return True + + +def ip2int(addr): + parts = addr.split('.') + return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \ + (int(parts[2]) << 8) + int(parts[3]) + + +def int2ip(addr): + return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]]) + + +def _sub_arp(cmd): + """ + Uses the prefered cloud-init subprocess def of util.subp + and runs arping. Breaking this to a separate function + for later use in mocking and unittests + """ + return util.subp(['arping'] + cmd) + + +def gratuitous_arp(items, distro): + source_param = '-S' + if distro.name in ['fedora', 'centos', 'rhel']: + source_param = '-s' + for item in items: + _sub_arp([ + '-c', '2', + source_param, item['source'], + item['destination'] + ]) + + +def get_md(): + rbx_data = None + devices = [ + dev + for dev, bdata in util.blkid().items() + if bdata.get('LABEL', '').upper() == 'CLOUDMD' + ] + for device in devices: + try: + rbx_data = util.mount_cb( + device=device, + callback=read_user_data_callback, + mtype=['vfat', 'fat'] + ) + if rbx_data: + break + except OSError as err: + if err.errno != errno.ENOENT: + raise + except util.MountFailedError: + util.logexc(LOG, "Failed to mount %s when looking for user " + "data", device) + if not rbx_data: + util.logexc(LOG, "Failed to load metadata and userdata") + return False + return rbx_data + + +def generate_network_config(netadps): + """Generate network configuration + + @param netadps: A list of network adapter settings + + @returns: A dict containing network config + """ + return { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'eth{}'.format(str(i)), + 'mac_address': netadp['macaddress'].lower(), + 'subnets': [ + { + 'type': 'static', + 'address': ip['address'], + 'netmask': netadp['network']['netmask'], + 'control': 'auto', + 'gateway': netadp['network']['gateway'], + 'dns_nameservers': netadp['network']['dns'][ + 'nameservers'] + } for ip in netadp['ip'] + ], + } for i, netadp in enumerate(netadps) + ] + } + + +def read_user_data_callback(mount_dir): + """This callback will be applied by util.mount_cb() on the mounted + drive. + + @param mount_dir: String representing path of directory where mounted drive + is available + + @returns: A dict containing userdata, metadata and cfg based on metadata. + """ + meta_data = util.load_json( + text=util.load_file( + fname=os.path.join(mount_dir, 'cloud.json'), + decode=False + ) + ) + user_data = util.load_file( + fname=os.path.join(mount_dir, 'user.data'), + quiet=True + ) + if 'vm' not in meta_data or 'netadp' not in meta_data: + util.logexc(LOG, "Failed to load metadata. Invalid format.") + return None + username = meta_data.get('additionalMetadata', {}).get('username') + ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', []) + + hash = None + if meta_data.get('additionalMetadata', {}).get('password'): + hash = meta_data['additionalMetadata']['password']['sha512'] + + network = generate_network_config(meta_data['netadp']) + + data = { + 'userdata': user_data, + 'metadata': { + 'instance-id': meta_data['vm']['_id'], + 'local-hostname': meta_data['vm']['name'], + 'public-keys': [] + }, + 'gratuitous_arp': [ + { + "source": ip["address"], + "destination": target + } + for netadp in meta_data['netadp'] + for ip in netadp['ip'] + for target in [ + netadp['network']["gateway"], + int2ip(ip2int(netadp['network']["gateway"]) + 2), + int2ip(ip2int(netadp['network']["gateway"]) + 3) + ] + ], + 'cfg': { + 'ssh_pwauth': True, + 'disable_root': True, + 'system_info': { + 'default_user': { + 'name': username, + 'gecos': username, + 'sudo': ['ALL=(ALL) NOPASSWD:ALL'], + 'passwd': hash, + 'lock_passwd': False, + 'ssh_authorized_keys': ssh_keys, + 'shell': '/bin/bash' + } + }, + 'network_config': network, + 'manage_etc_hosts': get_manage_etc_hosts(), + }, + } + + LOG.debug('returning DATA object:') + LOG.debug(data) + + return data + + +class DataSourceRbxCloud(sources.DataSource): + update_events = {'network': [ + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT + ]} + + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed = None + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + + def _get_data(self): + """ + Metadata is passed to the launching instance which + is used to perform instance configuration. + """ + rbx_data = get_md() + self.userdata_raw = rbx_data['userdata'] + self.metadata = rbx_data['metadata'] + self.gratuitous_arp = rbx_data['gratuitous_arp'] + self.cfg = rbx_data['cfg'] + return True + + @property + def network_config(self): + return self.cfg['network_config'] + + def get_public_ssh_keys(self): + return self.metadata['public-keys'] + + def get_userdata_raw(self): + return self.userdata_raw + + def get_config_obj(self): + return self.cfg + + def activate(self, cfg, is_new_instance): + gratuitous_arp(self.gratuitous_arp, self.distro) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceRbxCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst new file mode 100644 index 00000000..3d465bed --- /dev/null +++ b/doc/rtd/topics/datasources/rbxcloud.rst @@ -0,0 +1,25 @@ +.. _datasource_config_drive: + +Rbx Cloud +========= + +The Rbx datasource consumes the metadata drive available on platform +`HyperOne`_ and `Rootbox`_ platform. + +Datasource supports, in particular, network configurations, hostname, +user accounts and user metadata. + +Metadata drive +-------------- + +Drive metadata is a `FAT`_-formatted partition with the ```CLOUDMD``` label on +the system disk. Its contents are refreshed each time the virtual machine +is restarted, if the partition exists. For more information see +`HyperOne docs`_. + +.. _HyperOne: http://www.hyperone.com/ +.. _Rootbox: https://rootbox.com/ +.. _HyperOne Virtual Machine docs: http://www.hyperone.com/ +.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py new file mode 100644 index 00000000..aabf1f18 --- /dev/null +++ b/tests/unittests/test_datasource/test_rbx.py @@ -0,0 +1,208 @@ +import json + +from cloudinit import helpers +from cloudinit import distros +from cloudinit.sources import DataSourceRbxCloud as ds +from cloudinit.tests.helpers import mock, CiTestCase, populate_dir + +DS_PATH = "cloudinit.sources.DataSourceRbxCloud" + +CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ + "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ + "tToyGP41.s1" + +CLOUD_METADATA = { + "vm": { + "memory": 4, + "cpu": 2, + "name": "vm-image-builder", + "_id": "5beab44f680cffd11f0e60fc" + }, + "additionalMetadata": { + "username": "guru", + "sshKeys": ["ssh-rsa ..."], + "password": { + "sha512": CRYPTO_PASS + } + }, + "disk": [ + {"size": 10, "type": "ssd", + "name": "vm-image-builder-os", + "_id": "5beab450680cffd11f0e60fe"}, + {"size": 2, "type": "ssd", + "name": "ubuntu-1804-bionic", + "_id": "5bef002c680cffd11f107590"} + ], + "netadp": [ + { + "ip": [{"address": "62.181.8.174"}], + "network": { + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "routing": [], + "gateway": "62.181.8.1", + "netmask": "255.255.248.0", + "name": "public", + "type": "public", + "_id": "5784e97be2627505227b578c" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:03", + "_id": "5beab450680cffd11f0e6102" + }, + { + "ip": [{"address": "10.209.78.11"}], + "network": { + "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, + "routing": [], + "gateway": "10.209.78.1", + "netmask": "255.255.255.0", + "name": "network-determined-bardeen", + "type": "private", + "_id": "5beaec64680cffd11f0e7c31" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:24", + "_id": "5bec18c6680cffd11f0f0d8b" + } + ], + "dvddrive": [{"iso": {}}] +} + + +class TestRbxDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def setUp(self): + super(TestRbxDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp} + ) + + # defaults for few tests + self.ds = ds.DataSourceRbxCloud + self.seed_dir = self.paths.seed_dir + self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} + + def test_seed_read_user_data_callback_empty_file(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, {}) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertIsNone(results) + + def test_seed_read_user_data_callback_valid_disk(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertTrue('metadata' in results) + self.assertTrue('cfg' in results) + + def test_seed_read_user_data_callback_userdata(self): + userdata = "#!/bin/sh\nexit 1" + populate_user_metadata(self.seed_dir, userdata) + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertEqual(results['userdata'], userdata) + + def test_generate_network_config(self): + expected = { + 'version': 1, + 'config': [ + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'netmask': '255.255.248.0', + 'address': '62.181.8.174', + 'type': 'static', 'gateway': '62.181.8.1'} + ], + 'type': 'physical', + 'name': 'eth0', + 'mac_address': '00:15:5d:ff:0f:03' + }, + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], + 'netmask': '255.255.255.0', + 'address': '10.209.78.11', + 'type': 'static', + 'gateway': '10.209.78.1'} + ], + 'type': 'physical', + 'name': 'eth1', + 'mac_address': '00:15:5d:ff:0f:24' + } + ] + } + self.assertTrue( + ds.generate_network_config(CLOUD_METADATA['netadp']), + expected + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_gratuitous_arp_run_standard_arping(self, m_subp): + """Test handle run arping & parameters.""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_handle_rhel_like_arping(self, m_subp): + """Test handle on RHEL-like distros.""" + items = [ + { + 'source': '172.16.6.104', + 'destination': '172.17.0.2', + } + ] + ds.gratuitous_arp(items, self._fetch_distro('fedora')) + self.assertEqual([ + mock.call( + ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] + )], + m_subp.call_args_list + ) + + +def populate_cloud_metadata(path, data): + populate_dir(path, {'cloud.json': json.dumps(data)}) + + +def populate_user_metadata(path, data): + populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7aeeb91c..c5b5c46c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -267,10 +267,13 @@ class TestDsIdentify(DsIdentifyBase): """ConfigDrive datasource has a disk with LABEL=config-2.""" self._test_ds_found('ConfigDrive') + def test_rbx_cloud(self): + """Rbx datasource has a disk with LABEL=CLOUDMD.""" + self._test_ds_found('RbxCloud') + def test_config_drive_upper(self): """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" self._test_ds_found('ConfigDriveUpper') - return def test_config_drive_seed(self): """Config Drive seed directory.""" @@ -896,6 +899,18 @@ VALID_CFG = { os.path.join(P_SEED_DIR, 'config_drive', 'openstack', 'latest', 'meta_data.json'): 'md\n'}, }, + 'RbxCloud': { + 'ds': 'RbxCloud', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CLOUDMD'}] + )}, + ], + }, 'Hetzner': { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, diff --git a/tools/ds-identify b/tools/ds-identify index f76f2a6e..40fc0604 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -702,6 +702,11 @@ dscheck_OpenNebula() { return ${DS_NOT_FOUND} } +dscheck_RbxCloud() { + has_fs_with_label "CLOUDMD" "cloudmd" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + ovf_vmware_guest_customization() { # vmware guest customization -- cgit v1.2.3 From 7d5d34f3643a2108d667759f57a5ab63d0affadd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 11 Oct 2019 14:54:49 +0000 Subject: Add Support for e24cloud to Ec2 datasource. e24cloud provides an EC2 compatible datasource. This just identifies their platform based on dmi 'system-vendor' having 'e24cloud'. https://www.e24cloud.com/en/ . Updated chassis typo in zstack unit test docstring. LP: #1696476 --- cloudinit/apport.py | 1 + cloudinit/sources/DataSourceEc2.py | 12 +++++++++++- doc/rtd/topics/datasources.rst | 3 ++- doc/rtd/topics/datasources/e24cloud.rst | 9 +++++++++ tests/unittests/test_datasource/test_ec2.py | 15 ++++++++++++++- tests/unittests/test_ds_identify.py | 18 +++++++++++++++++- tools/ds-identify | 5 +++++ 7 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 doc/rtd/topics/datasources/e24cloud.rst (limited to 'tests') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index fde1f75b..c6797f12 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -22,6 +22,7 @@ KNOWN_CLOUD_NAMES = [ 'CloudSigma', 'CloudStack', 'DigitalOcean', + 'E24Cloud', 'GCE - Google Compute Engine', 'Exoscale', 'Hetzner Cloud', diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6c72ace2..1d88c9b1 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -34,6 +34,7 @@ class CloudNames(object): AWS = "aws" BRIGHTBOX = "brightbox" ZSTACK = "zstack" + E24CLOUD = "e24cloud" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "unknown" @@ -483,11 +484,16 @@ def identify_zstack(data): return CloudNames.ZSTACK +def identify_e24cloud(data): + if data['vendor'] == 'e24cloud': + return CloudNames.E24CLOUD + + def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() checks = (identify_aws, identify_brightbox, identify_zstack, - lambda x: CloudNames.UNKNOWN) + identify_e24cloud, lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) @@ -506,6 +512,7 @@ def _collect_platform_data(): uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) asset_tag: 'dmidecode -s chassis-asset-tag' + vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. @@ -534,6 +541,9 @@ def _collect_platform_data(): data['asset_tag'] = asset_tag.lower() + vendor = util.read_dmi_data('system-manufacturer') + data['vendor'] = (vendor if vendor else '').lower() + return data diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index a337c08c..70fbe07d 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -29,8 +29,9 @@ The following is a list of documents for each supported datasource: datasources/aliyun.rst datasources/altcloud.rst - datasources/ec2.rst datasources/azure.rst + datasources/ec2.rst + datasources/e24cloud.rst datasources/cloudsigma.rst datasources/cloudstack.rst datasources/configdrive.rst diff --git a/doc/rtd/topics/datasources/e24cloud.rst b/doc/rtd/topics/datasources/e24cloud.rst new file mode 100644 index 00000000..de9a4127 --- /dev/null +++ b/doc/rtd/topics/datasources/e24cloud.rst @@ -0,0 +1,9 @@ +.. _datasource_e24cloud: + +E24Cloud +======== +`E24Cloud ` platform provides an AWS Ec2 metadata +service clone. It identifies itself to guests using the dmi +system-manufacturer (/sys/class/dmi/id/sys_vendor). + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 6fabf258..5e1dd777 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -672,13 +672,14 @@ class TesIdentifyPlatform(test_helpers.CiTestCase): 'serial': 'H23-C4J3JV-R6', 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', 'uuid_source': 'dmi', + 'vendor': 'tothecloud', } unspecial.update(**kwargs) return unspecial @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') def test_identify_zstack(self, m_collect): - """zstack should be identified if cassis-asset-tag ends in .zstack.io + """zstack should be identified if chassis-asset-tag ends in .zstack.io """ m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) @@ -690,4 +691,16 @@ class TesIdentifyPlatform(test_helpers.CiTestCase): m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloud') + self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud_negative(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloudyday') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index c5b5c46c..12c6ae36 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -616,6 +616,14 @@ class TestDsIdentify(DsIdentifyBase): """EC2: chassis asset tag ends with 'zstack.io'""" self._test_ds_found('Ec2-ZStack') + def test_e24cloud_is_ec2(self): + """EC2: e24cloud identified by sys_vendor""" + self._test_ds_found('Ec2-E24Cloud') + + def test_e24cloud_not_active(self): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-E24Cloud-negative') + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -994,7 +1002,15 @@ VALID_CFG = { 'Ec2-ZStack': { 'ds': 'Ec2', 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, - } + }, + 'Ec2-E24Cloud': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloud\n'}, + }, + 'Ec2-E24Cloud-negative': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, + } } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 40fc0604..20a99ee9 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -905,6 +905,11 @@ ec2_identify_platform() { *.zstack.io) _RET="ZStack"; return 0;; esac + local vendor="${DI_DMI_SYS_VENDOR}" + case "$vendor" in + e24cloud) _RET="E24cloud"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" -- cgit v1.2.3 From 823708ea031290c864e3aef67f60f6eb495f281d Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 11 Oct 2019 16:52:13 +0000 Subject: cloud_test/lxd: Retry container delete a few times LXD integration tests fail sometimes due to failure to delete the container, usually related to ZFS backend. This is a transient issue unrelated to the test itself. Teach LXD platform to retry this a few times before returning an error. --- tests/cloud_tests/platforms/lxd/instance.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index 83c97ab4..2b804a62 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -4,6 +4,7 @@ import os import shutil +import time from tempfile import mkdtemp from cloudinit.util import load_yaml, subp, ProcessExecutionError, which @@ -224,7 +225,18 @@ class LXDInstance(Instance): LOG.debug("%s: deleting container.", self) self.unfreeze() self.shutdown() - self.pylxd_container.delete(wait=True) + retries = [1] * 5 + for attempt, wait in enumerate(retries): + try: + self.pylxd_container.delete(wait=True) + break + except Exception: + if attempt + 1 >= len(retries): + raise + LOG.debug('Failed to delete container %s (%s/%s) retrying...', + self, attempt + 1, len(retries)) + time.sleep(wait) + self._pylxd_container = None if self.platform.container_exists(self.name): -- cgit v1.2.3 From fac98983187c0984aa79c569c4b76cab90fd6f47 Mon Sep 17 00:00:00 2001 From: Harald Jensås Date: Wed, 16 Oct 2019 15:30:28 +0000 Subject: net: handle openstack dhcpv6-stateless configuration Openstack subnets can be configured to use SLAAC by setting ipv6_address_mode=dhcpv6-stateless. When this is the case the sysconfig interface configuration should use IPV6_AUTOCONF=yes and not set DHCPV6C=yes. This change sets the subnets type property to the full network['type'] from openstack metadata. cloudinit/net/sysconfig.py and cloudinit/net/eni.py are updated to support new subnet types: - 'ipv6_dhcpv6-stateless' => IPV6_AUTOCONF=yes - 'ipv6_dhcpv6-stateful' => DHCPV6C=yes Type 'dhcp6' in sysconfig is kept for backward compatibility with any implementations that set subnet_type == 'dhcp6'. LP: #1847517 --- cloudinit/net/eni.py | 7 +- cloudinit/net/sysconfig.py | 7 +- cloudinit/sources/helpers/openstack.py | 3 +- .../unittests/test_datasource/test_configdrive.py | 39 ++++++++++ tests/unittests/test_net.py | 88 ++++++++++++++++++++++ 5 files changed, 141 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index b129bb62..530922b5 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -411,8 +411,13 @@ class Renderer(renderer.Renderer): else: ipv4_subnet_mtu = subnet.get('mtu') iface['inet'] = subnet_inet - if subnet['type'].startswith('dhcp'): + if (subnet['type'] == 'dhcp4' or subnet['type'] == 'dhcp6' or + subnet['type'] == 'ipv6_dhcpv6-stateful'): + # Configure network settings using DHCP or DHCPv6 iface['mode'] = 'dhcp' + elif subnet['type'] == 'ipv6_dhcpv6-stateless': + # Configure network settings using SLAAC from RAs + iface['mode'] = 'auto' # do not emit multiple 'auto $IFACE' lines as older (precise) # ifupdown complains diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 87b548e5..4e656768 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -343,10 +343,15 @@ class Renderer(renderer.Renderer): for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): mtu_key = 'MTU' subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': + if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful': # TODO need to set BOOTPROTO to dhcp6 on SUSE iface_cfg['IPV6INIT'] = True + # Configure network settings using DHCPv6 iface_cfg['DHCPV6C'] = True + elif subnet_type == 'ipv6_dhcpv6-stateless': + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs + iface_cfg['IPV6_AUTOCONF'] = True elif subnet_type in ['dhcp4', 'dhcp']: iface_cfg['BOOTPROTO'] = 'dhcp' elif subnet_type == 'static': diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 8f069115..d1c4601a 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -585,7 +585,8 @@ def convert_net_json(network_json=None, known_macs=None): subnet = dict((k, v) for k, v in network.items() if k in valid_keys['subnet']) if 'dhcp' in network['type']: - t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' + t = (network['type'] if network['type'].startswith('ipv6') + else 'dhcp4') subnet.update({ 'type': t, }) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 520c50fe..8c788c1c 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -499,6 +499,45 @@ class TestNetJson(CiTestCase): known_macs=KNOWN_MACS) self.assertEqual(myds.network_config, network_config) + def test_network_config_conversion_dhcp6(self): + """Test some ipv6 input network json and check the expected + conversions.""" + in_data = { + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + ] + } + out_data = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:69:b0:58', + 'mtu': None, + 'name': 'enp0s1', + 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], + 'type': 'physical'}, + {'mac_address': 'fa:16:3e:d4:57:ad', + 'mtu': None, + 'name': 'enp0s2', + 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], + 'type': 'physical'} + ], + } + conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) + self.assertEqual(out_data, conv_data) + def test_network_config_conversions(self): """Tests a bunch of input network json and checks the expected conversions.""" diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b6597412..f5a9cae6 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1070,6 +1070,82 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_stateless': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateless'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_stateful': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateful'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'all': { 'expected_eni': ("""\ auto lo @@ -2781,6 +2857,18 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_dhcpv6_stateless_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_stateful_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateful'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + def test_check_ifcfg_rh(self): """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" render_dir = self.tmp_dir() -- cgit v1.2.3 From 7e699256b319cdf41e747211763e593a6b5f3393 Mon Sep 17 00:00:00 2001 From: Dominic Schlegel Date: Thu, 17 Oct 2019 14:36:40 +0000 Subject: replace any deprecated log.warn with log.warning Commit 6797e822959b84c98cf73e02b2a6e3d6ab3fd4fe replaced the LOG.warn calls that linters were warning about; this also replaces calls that linters would not have recognised (as `log` is generally a parameter in these scenarios). LP: #1508442 --- cloudinit/config/cc_apt_pipelining.py | 2 +- cloudinit/config/cc_byobu.py | 6 +++--- cloudinit/config/cc_chef.py | 20 ++++++++++---------- cloudinit/config/cc_emit_upstart.py | 2 +- cloudinit/config/cc_final_message.py | 2 +- cloudinit/config/cc_growpart.py | 2 +- cloudinit/config/cc_keys_to_console.py | 6 +++--- cloudinit/config/cc_lxd.py | 15 +++++++-------- cloudinit/config/cc_mounts.py | 14 +++++++------- .../config/cc_package_update_upgrade_install.py | 7 ++++--- cloudinit/config/cc_phone_home.py | 12 ++++++------ cloudinit/config/cc_power_state_change.py | 13 ++++++------- cloudinit/config/cc_puppet.py | 10 +++++----- cloudinit/config/cc_resizefs.py | 17 ++++++++--------- cloudinit/config/cc_resolv_conf.py | 4 ++-- cloudinit/config/cc_rightscale_userdata.py | 4 ++-- cloudinit/config/cc_rsyslog.py | 2 +- cloudinit/config/cc_scripts_per_boot.py | 4 ++-- cloudinit/config/cc_scripts_per_instance.py | 4 ++-- cloudinit/config/cc_scripts_per_once.py | 4 ++-- cloudinit/config/cc_scripts_user.py | 4 ++-- cloudinit/config/cc_scripts_vendor.py | 4 ++-- cloudinit/config/cc_seed_random.py | 2 +- cloudinit/config/cc_set_passwords.py | 2 +- cloudinit/config/cc_update_etc_hosts.py | 8 ++++---- cloudinit/config/cc_yum_add_repo.py | 10 +++++----- .../test_handler/test_handler_power_state.py | 2 +- 27 files changed, 90 insertions(+), 92 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 459332ab..225d0905 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -59,7 +59,7 @@ def handle(_name, cfg, _cloud, log, _args): elif apt_pipe_value_s in [str(b) for b in range(0, 6)]: write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: - log.warn("Invalid option for apt_pipelining: %s", apt_pipe_value) + log.warning("Invalid option for apt_pipelining: %s", apt_pipe_value) def write_apt_snippet(setting, log, f_name): diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 8570da15..0b4352c8 100755 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -60,7 +60,7 @@ def handle(name, cfg, cloud, log, args): valid = ("enable-user", "enable-system", "enable", "disable-user", "disable-system", "disable") if value not in valid: - log.warn("Unknown value %s for byobu_by_default", value) + log.warning("Unknown value %s for byobu_by_default", value) mod_user = value.endswith("-user") mod_sys = value.endswith("-system") @@ -80,8 +80,8 @@ def handle(name, cfg, cloud, log, args): (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if not user: - log.warn(("No default byobu user provided, " - "can not launch %s for the default user"), bl_inst) + log.warning(("No default byobu user provided, " + "can not launch %s for the default user"), bl_inst) else: shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) shcmd += " || X=$(($X+1)); " diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index a6240306..0ad6b7f1 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -196,7 +196,7 @@ def handle(name, cfg, cloud, log, _args): # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: log.debug(("Skipping module named %s," - " no 'chef' key in configuration"), name) + " no 'chef' key in configuration"), name) return chef_cfg = cfg['chef'] @@ -215,9 +215,9 @@ def handle(name, cfg, cloud, log, _args): if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): - log.warn("chef validation_cert provided as 'system', but " - "validation_key path '%s' does not exist.", - vkey_path) + log.warning("chef validation_cert provided as 'system', but " + "validation_key path '%s' does not exist.", + vkey_path) # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') @@ -234,8 +234,8 @@ def handle(name, cfg, cloud, log, _args): util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: - log.warn("No template found, not rendering to %s", - CHEF_RB_PATH) + log.warning("No template found, not rendering to %s", + CHEF_RB_PATH) # Set the firstboot json fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', @@ -276,9 +276,9 @@ def run_chef(chef_cfg, log): elif isinstance(cmd_args, six.string_types): cmd.append(cmd_args) else: - log.warn("Unknown type %s provided for chef" - " 'exec_arguments' expected list, tuple," - " or string", type(cmd_args)) + log.warning("Unknown type %s provided for chef" + " 'exec_arguments' expected list, tuple," + " or string", type(cmd_args)) cmd.extend(CHEF_EXEC_DEF_ARGS) else: cmd.extend(CHEF_EXEC_DEF_ARGS) @@ -334,7 +334,7 @@ def install_chef(cloud, chef_cfg, log): retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), omnibus_version=omnibus_version) else: - log.warn("Unknown chef install type '%s'", install_type) + log.warning("Unknown chef install type '%s'", install_type) run = False return run diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index eb9fbe66..b342e04d 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -69,6 +69,6 @@ def handle(name, _cfg, cloud, log, args): util.subp(cmd) except Exception as e: # TODO(harlowja), use log exception from utils?? - log.warn("Emission of upstart event %s failed due to: %s", n, e) + log.warning("Emission of upstart event %s failed due to: %s", n, e) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index c61f03d4..fd141541 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -83,6 +83,6 @@ def handle(_name, cfg, cloud, log, args): util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn) if cloud.datasource.is_disconnected: - log.warn("Used fallback datasource") + log.warning("Used fallback datasource") # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 564f376f..aa9716e7 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -321,7 +321,7 @@ def handle(_name, cfg, _cloud, log, _args): mycfg = cfg.get('growpart') if not isinstance(mycfg, dict): - log.warn("'growpart' in config was not a dict") + log.warning("'growpart' in config was not a dict") return mode = mycfg.get('mode', "auto") diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index aff4010e..8f8735ce 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -52,8 +52,8 @@ def _get_helper_tool_path(distro): def handle(name, cfg, cloud, log, _args): helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): - log.warn(("Unable to activate module %s," - " helper tool not found at %s"), name, helper_path) + log.warning(("Unable to activate module %s," + " helper tool not found at %s"), name, helper_path) return fp_blacklist = util.get_cfg_option_list(cfg, @@ -68,7 +68,7 @@ def handle(name, cfg, cloud, log, _args): util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) except Exception: - log.warn("Writing keys to the system console failed!") + log.warning("Writing keys to the system console failed!") raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index d9830770..151a9844 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -66,21 +66,21 @@ def handle(name, cfg, cloud, log, args): name) return if not isinstance(lxd_cfg, dict): - log.warn("lxd config must be a dictionary. found a '%s'", - type(lxd_cfg)) + log.warning("lxd config must be a dictionary. found a '%s'", + type(lxd_cfg)) return # Grab the configuration init_cfg = lxd_cfg.get('init') if not isinstance(init_cfg, dict): - log.warn("lxd/init config must be a dictionary. found a '%s'", - type(init_cfg)) + log.warning("lxd/init config must be a dictionary. found a '%s'", + type(init_cfg)) init_cfg = {} bridge_cfg = lxd_cfg.get('bridge', {}) if not isinstance(bridge_cfg, dict): - log.warn("lxd/bridge config must be a dictionary. found a '%s'", - type(bridge_cfg)) + log.warning("lxd/bridge config must be a dictionary. found a '%s'", + type(bridge_cfg)) bridge_cfg = {} # Install the needed packages @@ -95,7 +95,7 @@ def handle(name, cfg, cloud, log, args): try: cloud.distro.install_packages(packages) except util.ProcessExecutionError as exc: - log.warn("failed to install packages %s: %s", packages, exc) + log.warning("failed to install packages %s: %s", packages, exc) return # Set up lxd if init config is given @@ -301,5 +301,4 @@ def maybe_cleanup_default(net_name, did_init, create, attach, raise e LOG.debug(msg, nic_name, profile, fail_assume_enoent) - # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 123ffb84..c741c746 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -251,10 +251,10 @@ def setup_swapfile(fname, size=None, maxsize=None): util.ensure_dir(tdir) util.log_time(LOG.debug, msg, func=util.subp, args=[['sh', '-c', - ('rm -f "$1" && umask 0066 && ' - '{ fallocate -l "${2}M" "$1" || ' - ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' - 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), + ('rm -f "$1" && umask 0066 && ' + '{ fallocate -l "${2}M" "$1" || ' + 'dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' + 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), 'setup_swap', fname, mbsize]]) except Exception as e: @@ -347,8 +347,8 @@ def handle(_name, cfg, cloud, log, _args): for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): - log.warn("Mount option %s not a list, got a %s instead", - (i + 1), type_utils.obj_name(cfgmnt[i])) + log.warning("Mount option %s not a list, got a %s instead", + (i + 1), type_utils.obj_name(cfgmnt[i])) continue start = str(cfgmnt[i][0]) @@ -495,7 +495,7 @@ def handle(_name, cfg, cloud, log, _args): util.subp(cmd) log.debug(fmt, "PASS") except util.ProcessExecutionError: - log.warn(fmt, "FAIL") + log.warning(fmt, "FAIL") util.logexc(log, fmt, "FAIL") # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 17b91011..86afffef 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -108,7 +108,8 @@ def handle(_name, cfg, cloud, log, _args): reboot_fn_exists = os.path.isfile(REBOOT_FILE) if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: try: - log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE) + log.warning("Rebooting after upgrade or install per " + "%s", REBOOT_FILE) # Flush the above warning + anything else out... logging.flushLoggers(log) _fire_reboot(log) @@ -117,8 +118,8 @@ def handle(_name, cfg, cloud, log, _args): errors.append(e) if len(errors): - log.warn("%s failed with exceptions, re-raising the last one", - len(errors)) + log.warning("%s failed with exceptions, re-raising the last one", + len(errors)) raise errors[-1] # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 3be0d1c1..b8e27090 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -79,8 +79,8 @@ def handle(name, cfg, cloud, log, args): ph_cfg = cfg['phone_home'] if 'url' not in ph_cfg: - log.warn(("Skipping module named %s, " - "no 'url' found in 'phone_home' configuration"), name) + log.warning(("Skipping module named %s, " + "no 'url' found in 'phone_home' configuration"), name) return url = ph_cfg['url'] @@ -91,7 +91,7 @@ def handle(name, cfg, cloud, log, args): except Exception: tries = 10 util.logexc(log, "Configuration entry 'tries' is not an integer, " - "using %s instead", tries) + "using %s instead", tries) if post_list == "all": post_list = POST_LIST_ALL @@ -112,7 +112,7 @@ def handle(name, cfg, cloud, log, args): all_keys[n] = util.load_file(path) except Exception: util.logexc(log, "%s: failed to open, can not phone home that " - "data!", path) + "data!", path) submit_keys = {} for k in post_list: @@ -120,8 +120,8 @@ def handle(name, cfg, cloud, log, args): submit_keys[k] = all_keys[k] else: submit_keys[k] = None - log.warn(("Requested key %s from 'post'" - " configuration list not available"), k) + log.warning(("Requested key %s from 'post'" + " configuration list not available"), k) # Get them read to be posted real_submit_keys = {} diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 50b37470..43a479cf 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -103,24 +103,23 @@ def check_condition(cond, log=None): return False else: if log: - log.warn(pre + "unexpected exit %s. " % ret + - "do not apply change.") + log.warning(pre + "unexpected exit %s. " % ret + + "do not apply change.") return False except Exception as e: if log: - log.warn(pre + "Unexpected error: %s" % e) + log.warning(pre + "Unexpected error: %s" % e) return False def handle(_name, cfg, _cloud, log, _args): - try: (args, timeout, condition) = load_power_state(cfg) if args is None: log.debug("no power_state provided. doing nothing") return except Exception as e: - log.warn("%s Not performing power state change!" % str(e)) + log.warning("%s Not performing power state change!" % str(e)) return if condition is False: @@ -131,7 +130,7 @@ def handle(_name, cfg, _cloud, log, _args): cmdline = givecmdline(mypid) if not cmdline: - log.warn("power_state: failed to get cmdline of current process") + log.warning("power_state: failed to get cmdline of current process") return devnull_fp = open(os.devnull, "w") @@ -214,7 +213,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): def fatal(msg): if log: - log.warn(msg) + log.warning(msg) doexit(EXIT_FAIL) known_errnos = (errno.ENOENT, errno.ESRCH) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 4190a20b..e26712e0 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -98,8 +98,8 @@ def _autostart_puppet(log): elif os.path.exists('/sbin/chkconfig'): util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) else: - log.warn(("Sorry we do not know how to enable" - " puppet services on this system")) + log.warning(("Sorry we do not know how to enable" + " puppet services on this system")) def handle(name, cfg, cloud, log, _args): @@ -121,8 +121,8 @@ def handle(name, cfg, cloud, log, _args): p_constants = PuppetConstants(conf_file, ssl_dir, log) if not install and version: - log.warn(("Puppet install set false but version supplied," - " doing nothing.")) + log.warning(("Puppet install set false but version supplied," + " doing nothing.")) elif install: log.debug(("Attempting to install puppet %s,"), version if version else 'latest') @@ -141,7 +141,7 @@ def handle(name, cfg, cloud, log, _args): cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 - puppet_config.readfp( # pylint: disable=W1505 + puppet_config.readfp( # pylint: disable=W1505 StringIO(cleaned_contents), filename=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg['conf'].items(): diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index afd2e060..01dfc125 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -8,7 +8,6 @@ """Resizefs: cloud-config module which resizes the filesystem""" - import errno import getopt import os @@ -183,7 +182,7 @@ def maybe_get_writable_device_path(devpath, info, log): not container): devpath = util.rootdev_from_cmdline(util.get_cmdline()) if devpath is None: - log.warn("Unable to find device '/dev/root'") + log.warning("Unable to find device '/dev/root'") return None log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) @@ -212,8 +211,8 @@ def maybe_get_writable_device_path(devpath, info, log): log.debug("Device '%s' did not exist in container. " "cannot resize: %s", devpath, info) elif exc.errno == errno.ENOENT: - log.warn("Device '%s' did not exist. cannot resize: %s", - devpath, info) + log.warning("Device '%s' did not exist. cannot resize: %s", + devpath, info) else: raise exc return None @@ -223,8 +222,8 @@ def maybe_get_writable_device_path(devpath, info, log): log.debug("device '%s' not a block device in container." " cannot resize: %s" % (devpath, info)) else: - log.warn("device '%s' not a block device. cannot resize: %s" % - (devpath, info)) + log.warning("device '%s' not a block device. cannot resize: %s" % + (devpath, info)) return None return devpath # The writable block devpath @@ -243,7 +242,7 @@ def handle(name, cfg, _cloud, log, args): resize_what = "/" result = util.get_mount_info(resize_what, log) if not result: - log.warn("Could not determine filesystem type of %s", resize_what) + log.warning("Could not determine filesystem type of %s", resize_what) return (devpth, fs_type, mount_point) = result @@ -280,8 +279,8 @@ def handle(name, cfg, _cloud, log, args): break if not resizer: - log.warn("Not resizing unknown filesystem type %s for %s", - fs_type, resize_what) + log.warning("Not resizing unknown filesystem type %s for %s", + fs_type, resize_what) return resize_cmd = resizer(resize_what, devpth) diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 9812562a..69f4768a 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -102,11 +102,11 @@ def handle(name, cfg, cloud, log, _args): return if "resolv_conf" not in cfg: - log.warn("manage_resolv_conf True but no parameters provided!") + log.warning("manage_resolv_conf True but no parameters provided!") template_fn = cloud.get_template_filename('resolv.conf') if not template_fn: - log.warn("No template found, not rendering /etc/resolv.conf") + log.warning("No template found, not rendering /etc/resolv.conf") return generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"]) diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index 4e34c7e9..bd8ee89f 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -111,8 +111,8 @@ def handle(name, _cfg, cloud, log, _args): log.debug("%s urls were skipped or failed", skipped) if captured_excps: - log.warn("%s failed with exceptions, re-raising the last one", - len(captured_excps)) + log.warning("%s failed with exceptions, re-raising the last one", + len(captured_excps)) raise captured_excps[-1] # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 22b17532..ff211f65 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -432,7 +432,7 @@ def handle(name, cfg, cloud, log, _args): systemd=cloud.distro.uses_systemd()), except util.ProcessExecutionError as e: restarted = False - log.warn("Failed to reload syslog", e) + log.warning("Failed to reload syslog", e) if restarted: # This only needs to run if we *actually* restarted diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py index b03255c7..588e1b03 100644 --- a/cloudinit/config/cc_scripts_per_boot.py +++ b/cloudinit/config/cc_scripts_per_boot.py @@ -40,8 +40,8 @@ def handle(name, _cfg, cloud, log, _args): try: util.runparts(runparts_path) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py index baee5cc4..2bc8a6ef 100644 --- a/cloudinit/config/cc_scripts_per_instance.py +++ b/cloudinit/config/cc_scripts_per_instance.py @@ -40,8 +40,8 @@ def handle(name, _cfg, cloud, log, _args): try: util.runparts(runparts_path) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py index 4943e9aa..3f27ee34 100644 --- a/cloudinit/config/cc_scripts_per_once.py +++ b/cloudinit/config/cc_scripts_per_once.py @@ -40,8 +40,8 @@ def handle(name, _cfg, cloud, log, _args): try: util.runparts(runparts_path) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py index 6c66481e..d940dbd6 100644 --- a/cloudinit/config/cc_scripts_user.py +++ b/cloudinit/config/cc_scripts_user.py @@ -44,8 +44,8 @@ def handle(name, _cfg, cloud, log, _args): try: util.runparts(runparts_path) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py index 0292eafb..faac9242 100644 --- a/cloudinit/config/cc_scripts_vendor.py +++ b/cloudinit/config/cc_scripts_vendor.py @@ -48,8 +48,8 @@ def handle(name, cfg, cloud, log, _args): try: util.runparts(runparts_path, exe_prefix=prefix) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 65f6e777..a5d7c73f 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -131,7 +131,7 @@ def handle(name, cfg, cloud, log, _args): env['RANDOM_SEED_FILE'] = seed_path handle_random_seed_command(command=command, required=req, env=env) except ValueError as e: - log.warn("handling random command [%s] failed: %s", command, e) + log.warning("handling random command [%s] failed: %s", command, e) raise e # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index cf9b5ab5..1379428d 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -164,7 +164,7 @@ def handle(_name, cfg, cloud, log, args): if user: plist = ["%s:%s" % (user, password)] else: - log.warn("No default or defined user to change password for.") + log.warning("No default or defined user to change password for.") errors = [] if plist: diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index c96eede1..03fffb96 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -62,8 +62,8 @@ def handle(name, cfg, cloud, log, _args): if util.translate_bool(manage_hosts, addons=['template']): (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: - log.warn(("Option 'manage_etc_hosts' was set," - " but no hostname was found")) + log.warning(("Option 'manage_etc_hosts' was set," + " but no hostname was found")) return # Render from a template file @@ -80,8 +80,8 @@ def handle(name, cfg, cloud, log, _args): elif manage_hosts == "localhost": (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: - log.warn(("Option 'manage_etc_hosts' was set," - " but no hostname was found")) + log.warning(("Option 'manage_etc_hosts' was set," + " but no hostname was found")) return log.debug("Managing localhost in /etc/hosts") diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 6a42f499..3b354a7d 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -113,16 +113,16 @@ def handle(name, cfg, _cloud, log, _args): missing_required = 0 for req_field in ['baseurl']: if req_field not in repo_config: - log.warn(("Repository %s does not contain a %s" - " configuration 'required' entry"), - repo_id, req_field) + log.warning(("Repository %s does not contain a %s" + " configuration 'required' entry"), + repo_id, req_field) missing_required += 1 if not missing_required: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth else: - log.warn("Repository %s is missing %s required fields, skipping!", - repo_id, missing_required) + log.warning("Repository %s is missing %s required fields, " + "skipping!", repo_id, missing_required) for (c_repo_id, path) in repo_locations.items(): repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 3c726422..0d8d17b9 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -90,7 +90,7 @@ class TestCheckCondition(t_help.TestCase): mocklog = mock.Mock() self.assertEqual( psc.check_condition(self.cmd_with_exit(2), mocklog), False) - self.assertEqual(mocklog.warn.call_count, 1) + self.assertEqual(mocklog.warning.call_count, 1) def check_lps_ret(psc_return, mode=None): -- cgit v1.2.3 From ecb501b84338f078be18c38c68c3ce87fed3584b Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Thu, 17 Oct 2019 15:18:44 +0000 Subject: guestcust_util: handle special characters in config file Handle the special characters when reading VM Tools configure file. For example, the key and value may contain _, - and . etc. --- cloudinit/sources/helpers/vmware/imc/guestcust_util.py | 2 +- tests/unittests/test_vmware/test_guestcust_util.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index eb78172e..3d369d04 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -138,7 +138,7 @@ def get_tools_config(section, key, defaultVal): try: (outText, _) = util.subp(cmd) - m = re.match(r'([a-zA-Z0-9 ]+)=(.*)', outText) + m = re.match(r'([^=]+)=(.*)', outText) if m: retValue = m.group(2).strip() logger.debug("Get tools config: [%s] %s = %s", diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py index b8fa9942..b175a998 100644 --- a/tests/unittests/test_vmware/test_guestcust_util.py +++ b/tests/unittests/test_vmware/test_guestcust_util.py @@ -62,4 +62,11 @@ class TestGuestCustUtil(CiTestCase): get_tools_config('section', 'key', 'defaultVal'), 'Bar=Wark') + # value contains specific characters + with mock.patch.object(util, 'subp', + return_value=('[a] b.c_d=e-f', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'e-f') + # vi: ts=4 expandtab -- cgit v1.2.3 From 02c8214eac857e29b40ecc65992c1da6983083e1 Mon Sep 17 00:00:00 2001 From: Darren Birkett Date: Mon, 21 Oct 2019 16:17:03 +0000 Subject: net: enable infiniband support in eni and sysconfig renderers Commit e7b0e5f72 added support for configuring infiniband devices by adding a new infiniband 'type'. This commit updates eni and sysconfig renderers to consume this new type and configure infiniband devices correctly. LP: #1847114 --- cloudinit/net/eni.py | 9 +++++---- cloudinit/net/sysconfig.py | 3 ++- tests/unittests/test_net.py | 29 ++++++++++++++++++++++++++++- 3 files changed, 35 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 530922b5..a9a80c95 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -94,7 +94,7 @@ def _iface_add_attrs(iface, index, ipv4_subnet_mtu): ] renames = {'mac_address': 'hwaddress'} - if iface['type'] not in ['bond', 'bridge', 'vlan']: + if iface['type'] not in ['bond', 'bridge', 'infiniband', 'vlan']: ignore_map.append('mac_address') for key, value in iface.items(): @@ -472,9 +472,10 @@ class Renderer(renderer.Renderer): order = { 'loopback': 0, 'physical': 1, - 'bond': 2, - 'bridge': 3, - 'vlan': 4, + 'infiniband': 2, + 'bond': 3, + 'bridge': 4, + 'vlan': 5, } sections = [] diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 4e656768..e3815968 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -330,7 +330,8 @@ class Renderer(renderer.Renderer): old_value = iface.get(old_key) if old_value is not None: # only set HWADDR on physical interfaces - if old_key == 'mac_address' and iface['type'] != 'physical': + if (old_key == 'mac_address' and + iface['type'] not in ['physical', 'infiniband']): continue iface_cfg[new_key] = old_value diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index f5a9cae6..d2201998 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1176,6 +1176,12 @@ iface eth4 inet manual # control-manual eth5 iface eth5 inet dhcp +auto ib0 +iface ib0 inet static + address 192.168.200.7/24 + mtu 9000 + hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + auto bond0 iface bond0 inet6 dhcp bond-mode active-backup @@ -1457,7 +1463,19 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true ONBOOT=no STARTMODE=manual TYPE=Ethernet - USERCTL=no""") + USERCTL=no"""), + 'ifcfg-ib0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=ib0 + HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=InfiniBand + USERCTL=no"""), }, 'yaml': textwrap.dedent(""" version: 1 @@ -1532,6 +1550,15 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true vlan_id: 200 subnets: - type: dhcp4 + # An infiniband + - type: infiniband + name: ib0 + mac_address: >- + a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + subnets: + - type: static + address: 192.168.200.7/24 + mtu: 9000 # A bridge. - type: bridge name: br0 -- cgit v1.2.3 From 5bec6b0e2a2ce5fd03bb04f441536fc130e67997 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 24 Oct 2019 20:02:15 +0000 Subject: Fix usages of yaml, and move yaml_dump to safeyaml.dumps. Here we replace uses of the pyyaml module directly with functions provided by cloudinit.safeyaml. Also, change/move cloudinit.util.yaml_dumps to cloudinit.safeyaml.dumps LP: #1849640 --- cloudinit/cmd/devel/net_convert.py | 13 +++++-------- cloudinit/cmd/tests/test_main.py | 7 ++++--- cloudinit/config/cc_debug.py | 3 ++- cloudinit/config/cc_salt_minion.py | 6 +++--- cloudinit/config/cc_snappy.py | 3 ++- cloudinit/handlers/cloud_config.py | 3 ++- cloudinit/net/netplan.py | 15 ++++++++------- cloudinit/net/network_state.py | 5 +++-- cloudinit/safeyaml.py | 15 +++++++++++++++ cloudinit/util.py | 16 +--------------- tests/unittests/test_data.py | 3 ++- tests/unittests/test_runs/test_merge_run.py | 3 ++- tests/unittests/test_runs/test_simple_run.py | 7 ++++--- 13 files changed, 53 insertions(+), 46 deletions(-) (limited to 'tests') diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 1ad7e0bd..9b768304 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -5,13 +5,12 @@ import argparse import json import os import sys -import yaml from cloudinit.sources.helpers import openstack from cloudinit.sources import DataSourceAzure as azure from cloudinit.sources import DataSourceOVF as ovf -from cloudinit import distros +from cloudinit import distros, safeyaml from cloudinit.net import eni, netplan, network_state, sysconfig from cloudinit import log @@ -78,13 +77,12 @@ def handle_args(name, args): if args.kind == "eni": pre_ns = eni.convert_eni_data(net_data) elif args.kind == "yaml": - pre_ns = yaml.load(net_data) + pre_ns = safeyaml.load(net_data) if 'network' in pre_ns: pre_ns = pre_ns.get('network') if args.debug: sys.stderr.write('\n'.join( - ["Input YAML", - yaml.dump(pre_ns, default_flow_style=False, indent=4), ""])) + ["Input YAML", safeyaml.dumps(pre_ns), ""])) elif args.kind == 'network_data.json': pre_ns = openstack.convert_net_json( json.loads(net_data), known_macs=known_macs) @@ -100,9 +98,8 @@ def handle_args(name, args): "input data") if args.debug: - sys.stderr.write('\n'.join([ - "", "Internal State", - yaml.dump(ns, default_flow_style=False, indent=4), ""])) + sys.stderr.write('\n'.join( + ["", "Internal State", safeyaml.dumps(ns), ""])) distro_cls = distros.fetch(args.distro) distro = distro_cls(args.distro, {}, None) config = {} diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index a1e534fb..57b8fdf5 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -6,8 +6,9 @@ import os from six import StringIO from cloudinit.cmd import main +from cloudinit import safeyaml from cloudinit.util import ( - ensure_dir, load_file, write_file, yaml_dumps) + ensure_dir, load_file, write_file) from cloudinit.tests.helpers import ( FilesystemMockingTestCase, wrap_and_call) @@ -39,7 +40,7 @@ class TestMain(FilesystemMockingTestCase): ], 'cloud_init_modules': ['write-files', 'runcmd'], } - cloud_cfg = yaml_dumps(self.cfg) + cloud_cfg = safeyaml.dumps(self.cfg) ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) self.cloud_cfg_file = os.path.join( self.new_root, 'etc', 'cloud', 'cloud.cfg') @@ -113,7 +114,7 @@ class TestMain(FilesystemMockingTestCase): """When local-hostname metadata is present, call cc_set_hostname.""" self.cfg['datasource'] = { 'None': {'metadata': {'local-hostname': 'md-hostname'}}} - cloud_cfg = yaml_dumps(self.cfg) + cloud_cfg = safeyaml.dumps(self.cfg) write_file(self.cloud_cfg_file, cloud_cfg) cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 0a039eb3..610dbc8b 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -33,6 +33,7 @@ from six import StringIO from cloudinit import type_utils from cloudinit import util +from cloudinit import safeyaml SKIP_KEYS = frozenset(['log_cfgs']) @@ -49,7 +50,7 @@ def _make_header(text): def _dumps(obj): - text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False) + text = safeyaml.dumps(obj, explicit_start=False, explicit_end=False) return text.rstrip() diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index d6a21d72..cd9cb0b0 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -45,7 +45,7 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``. import os -from cloudinit import util +from cloudinit import safeyaml, util # Note: see https://docs.saltstack.com/en/latest/topics/installation/ # Note: see https://docs.saltstack.com/en/latest/ref/configuration/ @@ -97,13 +97,13 @@ def handle(name, cfg, cloud, log, _args): if 'conf' in s_cfg: # Add all sections from the conf object to minion config file minion_config = os.path.join(const.conf_dir, 'minion') - minion_data = util.yaml_dumps(s_cfg.get('conf')) + minion_data = safeyaml.dumps(s_cfg.get('conf')) util.write_file(minion_config, minion_data) if 'grains' in s_cfg: # add grains to /etc/salt/grains grains_config = os.path.join(const.conf_dir, 'grains') - grains_data = util.yaml_dumps(s_cfg.get('grains')) + grains_data = safeyaml.dumps(s_cfg.get('grains')) util.write_file(grains_config, grains_data) # ... copy the key pair if specified diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index 15bee2d3..b94cd04e 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -68,6 +68,7 @@ is ``auto``. Options are: from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import temp_utils +from cloudinit import safeyaml from cloudinit import util import glob @@ -188,7 +189,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None): # Note, however, we do not touch config files on disk. nested_cfg = {'config': {shortname: config}} (fd, cfg_tmpf) = temp_utils.mkstemp() - os.write(fd, util.yaml_dumps(nested_cfg).encode()) + os.write(fd, safeyaml.dumps(nested_cfg).encode()) os.close(fd) cfgfile = cfg_tmpf diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 99bf0e61..2a307364 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -14,6 +14,7 @@ from cloudinit import handlers from cloudinit import log as logging from cloudinit import mergers from cloudinit import util +from cloudinit import safeyaml from cloudinit.settings import (PER_ALWAYS) @@ -75,7 +76,7 @@ class CloudConfigPartHandler(handlers.Handler): '', ] lines.extend(file_lines) - lines.append(util.yaml_dumps(self.cloud_buf)) + lines.append(safeyaml.dumps(self.cloud_buf)) else: lines = [] util.write_file(self.cloud_fn, "\n".join(lines), 0o600) diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index e54a34e5..54be1221 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -8,6 +8,7 @@ from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2 from cloudinit import log as logging from cloudinit import util +from cloudinit import safeyaml from cloudinit.net import SYS_CLASS_NET, get_devicelist KNOWN_SNAPD_CONFIG = b"""\ @@ -235,9 +236,9 @@ class Renderer(renderer.Renderer): # if content already in netplan format, pass it back if network_state.version == 2: LOG.debug('V2 to V2 passthrough') - return util.yaml_dumps({'network': network_state.config}, - explicit_start=False, - explicit_end=False) + return safeyaml.dumps({'network': network_state.config}, + explicit_start=False, + explicit_end=False) ethernets = {} wifis = {} @@ -359,10 +360,10 @@ class Renderer(renderer.Renderer): # workaround yaml dictionary key sorting when dumping def _render_section(name, section): if section: - dump = util.yaml_dumps({name: section}, - explicit_start=False, - explicit_end=False, - noalias=True) + dump = safeyaml.dumps({name: section}, + explicit_start=False, + explicit_end=False, + noalias=True) txt = util.indent(dump, ' ' * 4) return [txt] return [] diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index c0c415d0..b485f3d9 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -12,6 +12,7 @@ import struct import six +from cloudinit import safeyaml from cloudinit import util LOG = logging.getLogger(__name__) @@ -253,7 +254,7 @@ class NetworkStateInterpreter(object): 'config': self._config, 'network_state': self._network_state, } - return util.yaml_dumps(state) + return safeyaml.dumps(state) def load(self, state): if 'version' not in state: @@ -272,7 +273,7 @@ class NetworkStateInterpreter(object): setattr(self, key, state[key]) def dump_network_state(self): - return util.yaml_dumps(self._network_state) + return safeyaml.dumps(self._network_state) def as_dict(self): return {'version': self._version, 'config': self._config} diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 3bd5e03d..d6f5f95b 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -6,6 +6,8 @@ import yaml +YAMLError = yaml.YAMLError + class _CustomSafeLoader(yaml.SafeLoader): def construct_python_unicode(self, node): @@ -27,4 +29,17 @@ class NoAliasSafeDumper(yaml.dumper.SafeDumper): def load(blob): return(yaml.load(blob, Loader=_CustomSafeLoader)) + +def dumps(obj, explicit_start=True, explicit_end=True, noalias=False): + """Return data in nicely formatted yaml.""" + + return yaml.dump(obj, + line_break="\n", + indent=4, + explicit_start=explicit_start, + explicit_end=explicit_end, + default_flow_style=False, + Dumper=(NoAliasSafeDumper + if noalias else yaml.dumper.Dumper)) + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 0d338ca7..1f600df4 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -38,7 +38,6 @@ from base64 import b64decode, b64encode from six.moves.urllib import parse as urlparse import six -import yaml from cloudinit import importer from cloudinit import log as logging @@ -958,7 +957,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): " but got %s instead") % (allowed, type_utils.obj_name(converted))) loaded = converted - except (yaml.YAMLError, TypeError, ValueError) as e: + except (safeyaml.YAMLError, TypeError, ValueError) as e: msg = 'Failed loading yaml blob' mark = None if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): @@ -1629,19 +1628,6 @@ def json_dumps(data): raise -def yaml_dumps(obj, explicit_start=True, explicit_end=True, noalias=False): - """Return data in nicely formatted yaml.""" - - return yaml.dump(obj, - line_break="\n", - indent=4, - explicit_start=explicit_start, - explicit_end=explicit_end, - default_flow_style=False, - Dumper=(safeyaml.NoAliasSafeDumper - if noalias else yaml.dumper.Dumper)) - - def ensure_dir(path, mode=None): if not os.path.isdir(path): # Make the dir and adjust the mode diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 3efe7adf..22cf8f28 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -27,6 +27,7 @@ from cloudinit.settings import (PER_INSTANCE) from cloudinit import sources from cloudinit import stages from cloudinit import user_data as ud +from cloudinit import safeyaml from cloudinit import util from cloudinit.tests import helpers @@ -502,7 +503,7 @@ c: 4 data = [{'content': '#cloud-config\npassword: gocubs\n'}, {'content': '#cloud-config\nlocale: chicago\n'}, {'content': non_decodable}] - message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode() + message = b'#cloud-config-archive\n' + safeyaml.dumps(data).encode() self.reRoot() ci = stages.Init() diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index d1ac4942..ff27a280 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -7,6 +7,7 @@ import tempfile from cloudinit.tests import helpers from cloudinit.settings import PER_INSTANCE +from cloudinit import safeyaml from cloudinit import stages from cloudinit import util @@ -26,7 +27,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): 'system_info': {'paths': {'run_dir': new_root}} } ud = helpers.readResource('user_data.1.txt') - cloud_cfg = util.yaml_dumps(cfg) + cloud_cfg = safeyaml.dumps(cfg) util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) util.write_file(os.path.join(new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index d67c422c..cb3aae60 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -5,6 +5,7 @@ import os from cloudinit.settings import PER_INSTANCE +from cloudinit import safeyaml from cloudinit import stages from cloudinit.tests import helpers from cloudinit import util @@ -34,7 +35,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): ], 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'], } - cloud_cfg = util.yaml_dumps(self.cfg) + cloud_cfg = safeyaml.dumps(self.cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) @@ -130,7 +131,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): # re-write cloud.cfg with unverified_modules override cfg = copy.deepcopy(self.cfg) cfg['unverified_modules'] = ['spacewalk'] # Would have skipped - cloud_cfg = util.yaml_dumps(cfg) + cloud_cfg = safeyaml.dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) @@ -159,7 +160,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): cfg = copy.deepcopy(self.cfg) # Represent empty configuration in /etc/cloud/cloud.cfg cfg['cloud_init_modules'] = None - cloud_cfg = util.yaml_dumps(cfg) + cloud_cfg = safeyaml.dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) -- cgit v1.2.3 From f1c788e2bb7c86069d43a015267facfb8aefcdf0 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 24 Oct 2019 20:16:47 +0000 Subject: net/netplan: use ipv6-mtu key for specifying ipv6 mtu values netplan introduced an 'info' subcommand which emits yaml describing implemented features that indicate new or changed fields and values in the yaml that it accepts. Previously, cloud-init emitted the key 'mtu6' for ipv6 MTU values. This is not correct and netplan will fail to parse these values. Netplan as of 0.98 supports both the info subcommand and the ipv6-mtu key. This branch modifies the netplan renderer to collect the netplan info output into a 'features' property which is a list of available feature flags which the renderer can use to modify its output. If the command is not available, no feature flags are set and cloud-init will render IPv6 MTU values just as MTU for the subnet. --- cloudinit/cmd/devel/net_convert.py | 2 ++ cloudinit/net/netplan.py | 35 +++++++++++++++++++++++++++-------- tests/unittests/test_net.py | 27 +++++++++++++++++++++++++-- 3 files changed, 54 insertions(+), 10 deletions(-) (limited to 'tests') diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 9b768304..2d27a76a 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -113,6 +113,8 @@ def handle_args(name, args): config['postcmds'] = False # trim leading slash config['netplan_path'] = config['netplan_path'][1:] + # enable some netplan features + config['features'] = ['dhcp-use-domains', 'ipv6-mtu'] else: r_cls = sysconfig.Renderer config = distro.renderer_configs.get('sysconfig') diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 54be1221..749d46f8 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -35,7 +35,7 @@ def _get_params_dict_by_match(config, match): if key.startswith(match)) -def _extract_addresses(config, entry, ifname): +def _extract_addresses(config, entry, ifname, features=None): """This method parse a cloudinit.net.network_state dictionary (config) and maps netstate keys/values into a dictionary (entry) to represent netplan yaml. @@ -67,7 +67,7 @@ def _extract_addresses(config, entry, ifname): 'match': {'macaddress': '52:54:00:12:34:00'}, 'mtu': 1501, 'address': ['192.168.1.2/24', '2001:4800:78ff:1b:be76:4eff:fe06:1000"], - 'mtu6': 1480} + 'ipv6-mtu': 1480} """ @@ -80,6 +80,8 @@ def _extract_addresses(config, entry, ifname): else: return [obj, ] + if features is None: + features = [] addresses = [] routes = [] nameservers = [] @@ -109,8 +111,8 @@ def _extract_addresses(config, entry, ifname): searchdomains += _listify(subnet.get('dns_search', [])) if 'mtu' in subnet: mtukey = 'mtu' - if subnet_is_ipv6(subnet): - mtukey += '6' + if subnet_is_ipv6(subnet) and 'ipv6-mtu' in features: + mtukey = 'ipv6-mtu' entry.update({mtukey: subnet.get('mtu')}) for route in subnet.get('routes', []): to_net = "%s/%s" % (route.get('network'), @@ -180,6 +182,7 @@ class Renderer(renderer.Renderer): """Renders network information in a /etc/netplan/network.yaml format.""" NETPLAN_GENERATE = ['netplan', 'generate'] + NETPLAN_INFO = ['netplan', 'info'] def __init__(self, config=None): if not config: @@ -189,6 +192,22 @@ class Renderer(renderer.Renderer): self.netplan_header = config.get('netplan_header', None) self._postcmds = config.get('postcmds', False) self.clean_default = config.get('clean_default', True) + self._features = config.get('features', None) + + @property + def features(self): + if self._features is None: + try: + info_blob, _err = util.subp(self.NETPLAN_INFO, capture=True) + info = util.load_yaml(info_blob) + self._features = info['netplan.io']['features'] + except util.ProcessExecutionError: + # if the info subcommand is not present then we don't have any + # new features + pass + except (TypeError, KeyError) as e: + LOG.debug('Failed to list features from netplan info: %s', e) + return self._features def render_network_state(self, network_state, templates=None, target=None): # check network state for version @@ -272,7 +291,7 @@ class Renderer(renderer.Renderer): else: del eth['match'] del eth['set-name'] - _extract_addresses(ifcfg, eth, ifname) + _extract_addresses(ifcfg, eth, ifname, self.features) ethernets.update({ifname: eth}) elif if_type == 'bond': @@ -297,7 +316,7 @@ class Renderer(renderer.Renderer): slave_interfaces = ifcfg.get('bond-slaves') if slave_interfaces == 'none': _extract_bond_slaves_by_name(interfaces, bond, ifname) - _extract_addresses(ifcfg, bond, ifname) + _extract_addresses(ifcfg, bond, ifname, self.features) bonds.update({ifname: bond}) elif if_type == 'bridge': @@ -332,7 +351,7 @@ class Renderer(renderer.Renderer): bridge.update({'parameters': br_config}) if ifcfg.get('mac_address'): bridge['macaddress'] = ifcfg.get('mac_address').lower() - _extract_addresses(ifcfg, bridge, ifname) + _extract_addresses(ifcfg, bridge, ifname, self.features) bridges.update({ifname: bridge}) elif if_type == 'vlan': @@ -344,7 +363,7 @@ class Renderer(renderer.Renderer): macaddr = ifcfg.get('mac_address', None) if macaddr is not None: vlan['macaddress'] = macaddr.lower() - _extract_addresses(ifcfg, vlan, ifname) + _extract_addresses(ifcfg, vlan, ifname, self.features) vlans.update({ifname: vlan}) # inject global nameserver values under each all interface which diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index d2201998..21604b12 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -996,8 +996,8 @@ NETWORK_CONFIGS = { addresses: - 192.168.14.2/24 - 2001:1::1/64 + ipv6-mtu: 1500 mtu: 9000 - mtu6: 1500 """).rstrip(' '), 'yaml': textwrap.dedent("""\ version: 1 @@ -3585,7 +3585,9 @@ class TestNetplanPostcommands(CiTestCase): @mock.patch.object(netplan.Renderer, '_netplan_generate') @mock.patch.object(netplan.Renderer, '_net_setup_link') - def test_netplan_render_calls_postcmds(self, mock_netplan_generate, + @mock.patch('cloudinit.util.subp') + def test_netplan_render_calls_postcmds(self, mock_subp, + mock_netplan_generate, mock_net_setup_link): tmp_dir = self.tmp_dir() ns = network_state.parse_net_config_data(self.mycfg, @@ -3597,6 +3599,7 @@ class TestNetplanPostcommands(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': True}) + mock_subp.side_effect = iter([util.ProcessExecutionError]) renderer.render_network_state(ns, target=render_dir) mock_netplan_generate.assert_called_with(run=True) @@ -3619,7 +3622,13 @@ class TestNetplanPostcommands(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': True}) + mock_subp.side_effect = iter([ + util.ProcessExecutionError, + ('', ''), + ('', ''), + ]) expected = [ + mock.call(['netplan', 'info'], capture=True), mock.call(['netplan', 'generate'], capture=True), mock.call(['udevadm', 'test-builtin', 'net_setup_link', '/sys/class/net/lo'], capture=True), @@ -3875,6 +3884,20 @@ class TestReadInitramfsConfig(CiTestCase): class TestNetplanRoundTrip(CiTestCase): + + NETPLAN_INFO_OUT = textwrap.dedent(""" + netplan.io: + features: + - dhcp-use-domains + - ipv6-mtu + website: https://netplan.io/ + """) + + def setUp(self): + super(TestNetplanRoundTrip, self).setUp() + self.add_patch('cloudinit.net.netplan.util.subp', 'm_subp') + self.m_subp.return_value = (self.NETPLAN_INFO_OUT, '') + def _render_and_read(self, network_config=None, state=None, netplan_path=None, target=None): if target is None: -- cgit v1.2.3 From e1b4b8c903fed3b69e57ec08c17ce94097d55901 Mon Sep 17 00:00:00 2001 From: Sam Eiderman Date: Tue, 29 Oct 2019 23:00:36 +0000 Subject: azure: Do not lock user on instance id change After initial boot ovf-env.xml is copied to agent dir (/var/lib/waagent/) with REDACTED password. On subsequent boots DataSourceAzure loads with a configuration where the user specified in /var/lib/waagent/ovf-env.xml is locked. If instance id changes, cc_users_groups action will lock the user. Fix this behavior by not locking the user if its password is REDACTED. LP: #1849677 --- cloudinit/sources/DataSourceAzure.py | 5 +++-- tests/unittests/test_datasource/test_azure.py | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 4984fa84..cdf49d36 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1193,9 +1193,10 @@ def read_azure_ovf(contents): defuser = {} if username: defuser['name'] = username - if password and DEF_PASSWD_REDACTION != password: - defuser['passwd'] = encrypt_pass(password) + if password: defuser['lock_passwd'] = False + if DEF_PASSWD_REDACTION != password: + defuser['passwd'] = encrypt_pass(password) if defuser: cfg['system_info'] = {'default_user': defuser} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3547dd94..80c6f019 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -769,6 +769,22 @@ scbus-1 on xpt0 bus 0 crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) + def test_user_not_locked_if_password_redacted(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': dsaz.DEF_PASSWD_REDACTION} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue('default_user' in dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user should be updated username and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertIn('lock_passwd', defuser) + self.assertFalse(defuser['lock_passwd']) + def test_userdata_plain(self): mydata = "FOOBAR" odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} -- cgit v1.2.3 From d3e71b5e843edf73eb7da511a032d987e314bd69 Mon Sep 17 00:00:00 2001 From: Matthias Baur Date: Thu, 31 Oct 2019 15:01:10 +0000 Subject: cc_puppet: Implement csr_attributes.yaml support This change adds two new parameters: * csr_attributes * csr_attributes_path Those parameters allow to configure the content of the csr_attributes.yaml file. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html --- cloudinit/config/cc_puppet.py | 34 ++++++++++++++++++---- .../unittests/test_handler/test_handler_puppet.py | 34 ++++++++++++++++++++++ 2 files changed, 63 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index e26712e0..b088db6e 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -24,9 +24,10 @@ module will attempt to start puppet even if no installation was performed. The module also provides keys for configuring the new puppet 4 paths and installing the puppet package from the puppetlabs repositories: https://docs.puppet.com/puppet/4.2/reference/whered_it_go.html -The keys are ``package_name``, ``conf_file`` and ``ssl_dir``. If unset, their -values will default to ones that work with puppet 3.x and with distributions -that ship modified puppet 4.x that uses the old paths. +The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and +``csr_attributes_path``. If unset, their values will default to +ones that work with puppet 3.x and with distributions that ship modified +puppet 4.x that uses the old paths. Puppet configuration can be specified under the ``conf`` key. The configuration is specified as a dictionary containing high-level ``
`` @@ -40,6 +41,10 @@ If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but instead will be used as the puppermaster certificate. It should be specified in pem format as a multi-line string (using the ``|`` yaml notation). +Additionally it's possible to create a csr_attributes.yaml for +CSR attributes and certificate extension requests. +See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html + **Internal name:** ``cc_puppet`` **Module frequency:** per instance @@ -53,6 +58,7 @@ in pem format as a multi-line string (using the ``|`` yaml notation). version: conf_file: '/etc/puppet/puppet.conf' ssl_dir: '/var/lib/puppet/ssl' + csr_attributes_path: '/etc/puppet/csr_attributes.yaml' package_name: 'puppet' conf: agent: @@ -62,28 +68,39 @@ in pem format as a multi-line string (using the ``|`` yaml notation). -------BEGIN CERTIFICATE------- -------END CERTIFICATE------- + csr_attributes: + custom_attributes: + 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 + extension_requests: + pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E + pp_image_name: my_ami_image + pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 """ from six import StringIO import os import socket +import yaml from cloudinit import helpers from cloudinit import util PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' PUPPET_SSL_DIR = '/var/lib/puppet/ssl' +PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml' PUPPET_PACKAGE_NAME = 'puppet' class PuppetConstants(object): - def __init__(self, puppet_conf_file, puppet_ssl_dir, log): + def __init__(self, puppet_conf_file, puppet_ssl_dir, + csr_attributes_path, log): self.conf_path = puppet_conf_file self.ssl_dir = puppet_ssl_dir self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") self.ssl_cert_path = os.path.join(self.ssl_cert_dir, "ca.pem") + self.csr_attributes_path = csr_attributes_path def _autostart_puppet(log): @@ -118,8 +135,10 @@ def handle(name, cfg, cloud, log, _args): conf_file = util.get_cfg_option_str( puppet_cfg, 'conf_file', PUPPET_CONF_PATH) ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) + csr_attributes_path = util.get_cfg_option_str( + puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH) - p_constants = PuppetConstants(conf_file, ssl_dir, log) + p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) if not install and version: log.warning(("Puppet install set false but version supplied," " doing nothing.")) @@ -176,6 +195,11 @@ def handle(name, cfg, cloud, log, _args): % (p_constants.conf_path)) util.write_file(p_constants.conf_path, puppet_config.stringify()) + if 'csr_attributes' in puppet_cfg: + util.write_file(p_constants.csr_attributes_path, + yaml.dump(puppet_cfg['csr_attributes'], + default_flow_style=False)) + # Set it up so it autostarts _autostart_puppet(log) diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py index 0b6e3b58..1494177d 100644 --- a/tests/unittests/test_handler/test_handler_puppet.py +++ b/tests/unittests/test_handler/test_handler_puppet.py @@ -6,6 +6,7 @@ from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import CiTestCase, mock import logging +import textwrap LOG = logging.getLogger(__name__) @@ -64,6 +65,7 @@ class TestPuppetHandle(CiTestCase): super(TestPuppetHandle, self).setUp() self.new_root = self.tmp_dir() self.conf = self.tmp_path('puppet.conf') + self.csr_attributes_path = self.tmp_path('csr_attributes.yaml') def _get_cloud(self, distro): paths = helpers.Paths({'templates_dir': self.new_root}) @@ -140,3 +142,35 @@ class TestPuppetHandle(CiTestCase): content = util.load_file(self.conf) expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n' self.assertEqual(expected, content) + + @mock.patch('cloudinit.config.cc_puppet.util.subp') + def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto): + """When csr_attributes is provided + creates file in PUPPET_CSR_ATTRIBUTES_PATH.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = { + 'puppet': { + 'csr_attributes': { + 'custom_attributes': { + '1.2.840.113549.1.9.7': '342thbjkt82094y0ut' + 'hhor289jnqthpc2290'}, + 'extension_requests': { + 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E', + 'pp_image_name': 'my_ami_image', + 'pp_preshared_key': '342thbjkt82094y0uthhor289jnqthpc2290'} + }}} + csr_attributes = 'cloudinit.config.cc_puppet.' \ + 'PUPPET_CSR_ATTRIBUTES_PATH' + with mock.patch(csr_attributes, self.csr_attributes_path): + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + content = util.load_file(self.csr_attributes_path) + expected = textwrap.dedent("""\ + custom_attributes: + 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 + extension_requests: + pp_image_name: my_ami_image + pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 + pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E + """) + self.assertEqual(expected, content) -- cgit v1.2.3 From a61ee02a50eb21954c114e01d2d042916bb2dc14 Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Thu, 31 Oct 2019 15:15:51 +0000 Subject: OVF: disable custom script execution by default For security concern, we disable the custom script by default.If a custom script is provided, stop customization unless the custom script is explicitly enabled by tools config. --- cloudinit/sources/DataSourceOVF.py | 4 ++-- tests/unittests/test_datasource/test_ovf.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index e7794aab..896841e3 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -155,8 +155,8 @@ class DataSourceOVF(sources.DataSource): custScriptConfig = get_tools_config( CONFGROUPNAME_GUESTCUSTOMIZATION, GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, - "true") - if custScriptConfig.lower() == "false": + "false") + if custScriptConfig.lower() != "true": # Update the customization status if there is a # custom script is disabled if special_customization and customscript: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index a615470a..a19c35c8 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -204,7 +204,7 @@ class TestDatasourceOVF(CiTestCase): customscript = self.tmp_path('test-script', self.tdir) util.write_file(customscript, "This is the post cust script") - with mock.patch(MPATH + 'get_tools_config', return_value='false'): + with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): with mock.patch(MPATH + 'set_customization_status', return_value=('msg', b'')): with self.assertRaises(RuntimeError) as context: -- cgit v1.2.3 From fcc92ad15199318abfad067c63f5ab941addc720 Mon Sep 17 00:00:00 2001 From: Harald Jensås Date: Thu, 31 Oct 2019 16:15:27 +0000 Subject: net: fix subnet_is_ipv6() for stateless|stateful Function return false for ipv6_dhcpv6-stateless|stateful, the eni renderer does not add '6' to 'inet' which is incorrect. The subnet_is_ipv6() function is updated to also return true if startswith('ipv6'). LP: #1848690 --- cloudinit/net/network_state.py | 4 ++-- tests/unittests/test_net.py | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index b485f3d9..ba85c69e 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -919,8 +919,8 @@ def is_ipv6_addr(address): def subnet_is_ipv6(subnet): """Common helper for checking network_state subnets for ipv6.""" - # 'static6' or 'dhcp6' - if subnet['type'].endswith('6'): + # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful' or 'ipv6_dhcpv6-stateless' + if subnet['type'].endswith('6') or subnet['type'].startswith('ipv6'): # This is a request for DHCPv6. return True elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 21604b12..6f83ad73 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -4098,6 +4098,22 @@ class TestEniRoundTrip(CiTestCase): entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) + def testsimple_render_dhcpv6_stateless(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_stateful(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + def testsimple_render_manual(self): """Test rendering of 'manual' for 'type' and 'control'. -- cgit v1.2.3 From 45ea695f9b4fce180c662ab4211575d64912634e Mon Sep 17 00:00:00 2001 From: Pavel Zakharov Date: Thu, 31 Oct 2019 16:26:54 +0000 Subject: Add config for ssh-key import and consuming user-data This patch enables control over SSH public-key import and discarding supplied user-data (both disabled by default). allow-userdata: false ssh: allow_public_ssh_keys: false This feature enables closed appliances to prevent customers from unintentionally breaking the appliance which were not designed for user interaction. The downstream change for this is here: https://github.com/delphix/cloud-init/pull/4 --- cloudinit/config/cc_ssh.py | 15 +++++++++++-- cloudinit/config/tests/test_ssh.py | 46 ++++++++++++++++++++++++++++++-------- cloudinit/stages.py | 6 ++++- doc/rtd/topics/format.rst | 8 +++++++ tests/unittests/test_data.py | 40 +++++++++++++++++++++++++++++++++ 5 files changed, 103 insertions(+), 12 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index fdd8f4d3..050285a8 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -56,9 +56,13 @@ root login is disabled, and root login opts are set to:: no-port-forwarding,no-agent-forwarding,no-X11-forwarding Authorized keys for the default user/first user defined in ``users`` can be -specified using `ssh_authorized_keys``. Keys should be specified as a list of +specified using ``ssh_authorized_keys``. Keys should be specified as a list of public keys. +Importing ssh public keys for the default user (defined in ``users``)) is +enabled by default. This feature may be disabled by setting +``allow_publish_ssh_keys: false``. + .. note:: see the ``cc_set_passwords`` module documentation to enable/disable ssh password authentication @@ -91,6 +95,7 @@ public keys. ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ... - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ... + allow_public_ssh_keys: ssh_publish_hostkeys: enabled: (Defaults to true) blacklist: (Defaults to [dsa]) @@ -207,7 +212,13 @@ def handle(_name, cfg, cloud, log, _args): disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts", ssh_util.DISABLE_USER_OPTS) - keys = cloud.get_public_ssh_keys() or [] + keys = [] + if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True): + keys = cloud.get_public_ssh_keys() or [] + else: + log.debug('Skipping import of publish ssh keys per ' + 'config setting: allow_public_ssh_keys=False') + if "ssh_authorized_keys" in cfg: cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py index e7789842..0c554414 100644 --- a/cloudinit/config/tests/test_ssh.py +++ b/cloudinit/config/tests/test_ssh.py @@ -5,6 +5,9 @@ import os.path from cloudinit.config import cc_ssh from cloudinit import ssh_util from cloudinit.tests.helpers import CiTestCase, mock +import logging + +LOG = logging.getLogger(__name__) MODPATH = "cloudinit.config.cc_ssh." @@ -87,7 +90,7 @@ class TestHandleSsh(CiTestCase): cc_ssh.PUBLISH_HOST_KEYS = False cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE") options = options.replace("$DISABLE_USER", "root") m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') @@ -100,6 +103,31 @@ class TestHandleSsh(CiTestCase): self.assertEqual([mock.call(set(keys), "root", options=options)], m_setup_keys.call_args_list) + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test allow_public_ssh_keys=False ignores ssh public keys from + platform. + """ + cfg = {"allow_public_ssh_keys": False} + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, LOG, None) + + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(), user), + mock.call(set(), "root", options=options)], + m_setup_keys.call_args_list) + @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") @@ -115,7 +143,7 @@ class TestHandleSsh(CiTestCase): m_nug.return_value = ({user: {"default": user}}, {}) cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) options = options.replace("$DISABLE_USER", "root") @@ -140,7 +168,7 @@ class TestHandleSsh(CiTestCase): m_nug.return_value = ({user: {"default": user}}, {}) cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) options = options.replace("$DISABLE_USER", "root") @@ -165,7 +193,7 @@ class TestHandleSsh(CiTestCase): cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) cloud.get_public_ssh_keys = mock.Mock(return_value=keys) - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) self.assertEqual([mock.call(set(keys), user), mock.call(set(keys), "root", options="")], @@ -196,7 +224,7 @@ class TestHandleSsh(CiTestCase): cfg = {} expected_call = [self.test_hostkeys[key_type] for key_type in ['ecdsa', 'ed25519', 'rsa']] - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) self.assertEqual([mock.call(expected_call)], cloud.datasource.publish_host_keys.call_args_list) @@ -225,7 +253,7 @@ class TestHandleSsh(CiTestCase): cfg = {'ssh_publish_hostkeys': {'enabled': True}} expected_call = [self.test_hostkeys[key_type] for key_type in ['ecdsa', 'ed25519', 'rsa']] - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) self.assertEqual([mock.call(expected_call)], cloud.datasource.publish_host_keys.call_args_list) @@ -252,7 +280,7 @@ class TestHandleSsh(CiTestCase): cloud.datasource.publish_host_keys = mock.Mock() cfg = {'ssh_publish_hostkeys': {'enabled': False}} - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) cloud.datasource.publish_host_keys.assert_not_called() @@ -282,7 +310,7 @@ class TestHandleSsh(CiTestCase): 'blacklist': ['dsa', 'rsa']}} expected_call = [self.test_hostkeys[key_type] for key_type in ['ecdsa', 'ed25519']] - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) self.assertEqual([mock.call(expected_call)], cloud.datasource.publish_host_keys.call_args_list) @@ -312,6 +340,6 @@ class TestHandleSsh(CiTestCase): 'blacklist': []}} expected_call = [self.test_hostkeys[key_type] for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']] - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) self.assertEqual([mock.call(expected_call)], cloud.datasource.publish_host_keys.call_args_list) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 77c21de0..71f3a49e 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -549,7 +549,11 @@ class Init(object): with events.ReportEventStack("consume-user-data", "reading and applying user-data", parent=self.reporter): - self._consume_userdata(frequency) + if util.get_cfg_option_bool(self.cfg, 'allow_userdata', True): + self._consume_userdata(frequency) + else: + LOG.debug('allow_userdata = False: discarding user-data') + with events.ReportEventStack("consume-vendor-data", "reading and applying vendor-data", parent=self.reporter): diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index 76050402..f9f4ba6c 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -196,6 +196,14 @@ Example Also this `blog`_ post offers another example for more advanced usage. +Disabling User-Data +=================== + +Cloud-init can be configured to ignore any user-data provided to instance. +This allows custom images to prevent users from accidentally breaking closed +appliances. Setting ``allow_userdata: false`` in the configuration will disable +cloud-init from processing user-data. + .. [#] See your cloud provider for applicable user-data size limitations... .. _blog: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html .. vi: textwidth=78 diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 22cf8f28..e55feb22 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -525,6 +525,46 @@ c: 4 self.assertEqual(cfg.get('password'), 'gocubs') self.assertEqual(cfg.get('locale'), 'chicago') + @mock.patch('cloudinit.util.read_conf_with_confd') + def test_dont_allow_user_data(self, mock_cfg): + mock_cfg.return_value = {"allow_userdata": False} + + # test that user-data is ignored but vendor-data is kept + user_blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "qux" }, + { "op": "add", "path": "/bar", "value": "qux2" } +] +''' + vendor_blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "quxA" }, + { "op": "add", "path": "/bar", "value": "quxB" }, + { "op": "add", "path": "/foo", "value": "quxC" } +] +''' + self.reRoot() + initer = stages.Init() + initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mods = stages.Modules(initer) + (_which_ran, _failures) = mods.run_section('cloud_init_modules') + cfg = mods.cfg + self.assertIn('vendor_data', cfg) + self.assertEqual('quxA', cfg['baz']) + self.assertEqual('quxB', cfg['bar']) + self.assertEqual('quxC', cfg['foo']) + class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): -- cgit v1.2.3 From e81389592a67bb54b889512928dcdf65f87ad436 Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Thu, 31 Oct 2019 19:45:29 +0000 Subject: DataSourceSmartOS: reconfigure network on each boot In typical cases, SmartOS does not use DHCP for network configuration. As such, if the network configuration changes that is reflected in metadata and will be picked up during the next boot. LP: #1765801 Joyent: OS-6902 reconfigure network on each boot --- cloudinit/sources/DataSourceSmartOS.py | 8 +++++++- tests/unittests/test_datasource/test_smartos.py | 9 ++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 32b57cdd..cf676504 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -1,5 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. -# Copyright (c) 2018, Joyent, Inc. +# Copyright 2019 Joyent, Inc. # # Author: Ben Howard # @@ -34,6 +34,7 @@ from cloudinit import log as logging from cloudinit import serial from cloudinit import sources from cloudinit import util +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -178,6 +179,7 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = {} self.network_data = None self._network_config = None + self.update_events['network'].add(EventType.BOOT) self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) @@ -319,6 +321,10 @@ class DataSourceSmartOS(sources.DataSource): @property def network_config(self): + # sources.clear_cached_data() may set _network_config to '_unset'. + if self._network_config == sources.UNSET: + self._network_config = None + if self._network_config is None: if self.network_data is not None: self._network_config = ( diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 42ac6971..d5b1c29c 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -1,5 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. -# Copyright (c) 2018, Joyent, Inc. +# Copyright 2019 Joyent, Inc. # # Author: Ben Howard # @@ -31,6 +31,7 @@ from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, identify_file) +from cloudinit.event import EventType import six @@ -653,6 +654,12 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.assertEqual(dsrc.device_name_to_device('FOO'), mydscfg['disk_aliases']['FOO']) + def test_reconfig_network_on_boot(self): + # Test to ensure that network is configured from metadata on each boot + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]), + dsrc.update_events['network']) + class TestIdentifyFile(CiTestCase): """Test the 'identify_file' utility.""" -- cgit v1.2.3 From 15fa154602f281c9239084d7d20a0999c6b09970 Mon Sep 17 00:00:00 2001 From: David Kindred Date: Mon, 4 Nov 2019 22:00:19 +0000 Subject: configdrive: fix subplatform config-drive for /config-drive source When ConfigDrive discovers the source path /config-drive, subplatform is now reports 'config-drive' LP: #1849731 --- cloudinit/sources/DataSourceConfigDrive.py | 6 +++--- .../unittests/test_datasource/test_configdrive.py | 23 ++++++++++++++++++---- 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index c3627152..f77923c2 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -163,10 +163,10 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): def _get_subplatform(self): """Return the subplatform metadata source details.""" - if self.seed_dir in self.source: - subplatform_type = 'seed-dir' - elif self.source.startswith('/dev'): + if self.source.startswith('/dev'): subplatform_type = 'config-disk' + else: + subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.source) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 8c788c1c..cfb3b0a7 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -220,13 +220,15 @@ CFG_DRIVE_FILES_V2 = { 'openstack/2015-10-15/user_data': USER_DATA, 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} +M_PATH = "cloudinit.sources.DataSourceConfigDrive." + class TestConfigDriveDataSource(CiTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() self.add_patch( - "cloudinit.sources.DataSourceConfigDrive.util.find_devs_with", + M_PATH + "util.find_devs_with", "m_find_devs_with", return_value=[]) self.tmp = self.tmp_dir() @@ -468,7 +470,7 @@ class TestConfigDriveDataSource(CiTestCase): util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_pubkeys_v2(self, on_first_boot): """Verify that public-keys work in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) @@ -478,6 +480,19 @@ class TestConfigDriveDataSource(CiTestCase): self.assertEqual('openstack', myds.platform) self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) + def test_subplatform_config_drive_when_starts_with_dev(self): + """subplatform reports config-drive when source starts with /dev/.""" + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs: + with mock.patch(M_PATH + 'util.is_FreeBSD', return_value=False): + with mock.patch(M_PATH + 'util.mount_cb'): + with mock.patch(M_PATH + 'on_first_boot'): + m_find_devs.return_value = ['/dev/anything'] + self.assertEqual(True, cfg_ds.get_data()) + self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) + class TestNetJson(CiTestCase): def setUp(self): @@ -485,13 +500,13 @@ class TestNetJson(CiTestCase): self.tmp = self.tmp_dir() self.maxDiff = None - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_network_data_is_found(self, on_first_boot): """Verify that network_data is present in ds in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertIsNotNone(myds.network_json) - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_network_config_is_converted(self, on_first_boot): """Verify that network_data is converted and present on ds object.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) -- cgit v1.2.3 From 02f07b666adc62d70c4f1a98c2ae80cb6629fa9a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 4 Nov 2019 22:11:37 +0000 Subject: azure: support matching dhcp route-metrics for dual-stack ipv4 ipv6 Network v2 configuration for Azure will set both dhcp4 and dhcp6 to False by default. When IPv6 privateIpAddresses are present for an interface in Azure's Instance Metadata Service (IMDS), set dhcp6: True and provide a route-metric value that will match the corresponding dhcp4 route-metric. The route-metric value will increase by 100 for each additional interface present to ensure the primary interface has a route to IMDS. Also fix dhcp route-metric rendering for eni and sysconfig distros. LP: #1850308 --- cloudinit/net/network_state.py | 17 ++++- cloudinit/net/sysconfig.py | 6 +- cloudinit/sources/DataSourceAzure.py | 10 ++- tests/unittests/test_datasource/test_azure.py | 101 ++++++++++++++++++++++++++ tests/unittests/test_net.py | 54 ++++++++++++++ 5 files changed, 178 insertions(+), 10 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index ba85c69e..20b7716b 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -22,8 +22,9 @@ NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } NETWORK_V2_KEY_FILTER = [ - 'addresses', 'dhcp4', 'dhcp6', 'gateway4', 'gateway6', 'interfaces', - 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan' + 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides', + 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers', + 'renderer', 'set-name', 'wakeonlan' ] NET_CONFIG_TO_V2 = { @@ -747,12 +748,20 @@ class NetworkStateInterpreter(object): def _v2_to_v1_ipcfg(self, cfg): """Common ipconfig extraction from v2 to v1 subnets array.""" + def _add_dhcp_overrides(overrides, subnet): + if 'route-metric' in overrides: + subnet['metric'] = overrides['route-metric'] + subnets = [] if cfg.get('dhcp4'): - subnets.append({'type': 'dhcp4'}) + subnet = {'type': 'dhcp4'} + _add_dhcp_overrides(cfg.get('dhcp4-overrides', {}), subnet) + subnets.append(subnet) if cfg.get('dhcp6'): + subnet = {'type': 'dhcp6'} self.use_ipv6 = True - subnets.append({'type': 'dhcp6'}) + _add_dhcp_overrides(cfg.get('dhcp6-overrides', {}), subnet) + subnets.append(subnet) gateway4 = None gateway6 = None diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 6717d924..fe0c67ca 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -395,6 +395,9 @@ class Renderer(renderer.Renderer): ipv6_index = -1 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): subnet_type = subnet.get('type') + # metric may apply to both dhcp and static config + if 'metric' in subnet: + iface_cfg['METRIC'] = subnet['metric'] if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: if has_default_route and iface_cfg['BOOTPROTO'] != 'none': iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False @@ -426,9 +429,6 @@ class Renderer(renderer.Renderer): else: iface_cfg['GATEWAY'] = subnet['gateway'] - if 'metric' in subnet: - iface_cfg['METRIC'] = subnet['metric'] - if 'dns_search' in subnet: iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search']) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index cdf49d36..44cca210 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1322,7 +1322,8 @@ def parse_network_config(imds_metadata): network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} + dev_config = {'dhcp4': False, 'dhcp6': False} + dhcp_override = {'route-metric': (idx + 1) * 100} for addr4 in intf['ipv4']['ipAddress']: privateIpv4 = addr4['privateIpAddress'] if privateIpv4: @@ -1340,12 +1341,15 @@ def parse_network_config(imds_metadata): # non-primary interfaces should have a higher # route-metric (cost) so default routes prefer # primary nic due to lower route-metric value - dev_config['dhcp4-overrides'] = { - 'route-metric': (idx + 1) * 100} + dev_config['dhcp4-overrides'] = dhcp_override for addr6 in intf['ipv6']['ipAddress']: privateIpv6 = addr6['privateIpAddress'] if privateIpv6: dev_config['dhcp6'] = True + # non-primary interfaces should have a higher + # route-metric (cost) so default routes prefer + # primary nic due to lower route-metric value + dev_config['dhcp6-overrides'] = dhcp_override break if dev_config: mac = ':'.join(re.findall(r'..', intf['macAddress'])) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 80c6f019..d92d7b2f 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -153,6 +153,102 @@ SECONDARY_INTERFACE = { MOCKPATH = 'cloudinit.sources.DataSourceAzure.' +class TestParseNetworkConfig(CiTestCase): + + maxDiff = None + + def test_single_ipv4_nic_configuration(self): + """parse_network_config emits dhcp on single nic with ipv4""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) + + def test_increases_route_metric_for_non_primary_nics(self): + """parse_network_config increases route-metric for each nic""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_and_ipv6_route_metrics_match_for_nics(self): + """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp6': False, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + third_intf['ipv6'] = { + "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_secondary_ips_will_be_static_addrs(self): + """parse_network_config emits primary ipv4 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + # Secondary ipv6 addresses currently ignored/unconfigured + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + class TestGetMetadataFromIMDS(HttprettyTestCase): with_logs = True @@ -641,6 +737,7 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}}}, 'version': 2} @@ -658,14 +755,17 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}}, 'eth1': {'set-name': 'eth1', 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200}}, 'eth2': {'set-name': 'eth2', 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} @@ -999,6 +1099,7 @@ scbus-1 on xpt0 bus 0 'ethernets': { 'eth0': {'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}}, 'version': 2} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 6f83ad73..35ce55d2 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3101,6 +3101,36 @@ USERCTL=no self._compare_files_to_expected( expected, self._render_and_read(network_config=v2data)) + def test_from_v2_route_metric(self): + """verify route-metric gets rendered on nic when source is netplan.""" + overrides = {'route-metric': 100} + v2base = { + 'version': 2, + 'ethernets': { + 'eno1': {'dhcp4': True, + 'match': {'macaddress': '07-1c-c6-75-a4-be'}}}} + expected = { + 'ifcfg-eno1': textwrap.dedent("""\ + BOOTPROTO=dhcp + DEVICE=eno1 + HWADDR=07-1c-c6-75-a4-be + METRIC=100 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + } + for dhcp_ver in ('dhcp4', 'dhcp6'): + v2data = copy.deepcopy(v2base) + if dhcp_ver == 'dhcp6': + expected['ifcfg-eno1'] += "IPV6INIT=yes\nDHCPV6C=yes\n" + v2data['ethernets']['eno1'].update( + {dhcp_ver: True, '{0}-overrides'.format(dhcp_ver): overrides}) + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + class TestOpenSuseSysConfigRendering(CiTestCase): @@ -3466,6 +3496,30 @@ iface eth0 inet dhcp self.assertEqual( expected, dir2dict(tmp_dir)['/etc/network/interfaces']) + def test_v2_route_metric_to_eni(self): + """Network v2 route-metric overrides are preserved in eni output""" + tmp_dir = self.tmp_dir() + renderer = eni.Renderer() + expected_tmpl = textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet{suffix} dhcp + metric 100 + """) + for dhcp_ver in ('dhcp4', 'dhcp6'): + suffix = '6' if dhcp_ver == 'dhcp6' else '' + dhcp_cfg = { + dhcp_ver: True, + '{ver}-overrides'.format(ver=dhcp_ver): {'route-metric': 100}} + v2_input = {'version': 2, 'ethernets': {'eth0': dhcp_cfg}} + ns = network_state.parse_net_config_data(v2_input) + renderer.render_network_state(ns, target=tmp_dir) + self.assertEqual( + expected_tmpl.format(suffix=suffix), + dir2dict(tmp_dir)['/etc/network/interfaces']) + class TestNetplanNetRendering(CiTestCase): -- cgit v1.2.3 From 92cc71dec8da73bf43d345418b73db1a32a6b8b9 Mon Sep 17 00:00:00 2001 From: momousta Date: Fri, 8 Nov 2019 15:02:07 -0600 Subject: reporting: Using a uuid to enforce uniqueness on the KVP keys. The KVPs currently being emitted to the .kvp_pool file can have duplicate keys which is wrong since these keys should be unique. The situation can occur if for example one azure function called twice or more and this function is reporting telemetry through the use of KVPs. Any KVP consumer can get confused by the duplicate keys and a race condition can and have occurred. --- cloudinit/reporting/handlers.py | 8 +++++--- tests/unittests/test_reporting_hyperv.py | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 10165aec..6605e795 100755 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc +import uuid import fcntl import json import six @@ -201,10 +202,11 @@ class HyperVKvpReportingHandler(ReportingHandler): def _event_key(self, event): """ the event key format is: - CLOUD_INIT||| + CLOUD_INIT||||