From c6dff581a9c253170d5e3f12fb83d16a8dec8257 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 12 Apr 2018 15:32:25 -0400 Subject: Implement ntp client spec with auto support for distro selection Add a base NTP client configuration dictionary and allow Distro specific changes to be merged. Add a select client function which implements logic to preferr installed clients over clients which need to be installed. Also allow distributions to override the cloud-init defaults. LP: #1749722 --- tests/unittests/test_distros/test_netconfig.py | 6 + .../test_distros/test_user_data_normalize.py | 6 + tests/unittests/test_handler/test_handler_ntp.py | 881 ++++++++++++++------- 3 files changed, 590 insertions(+), 303 deletions(-) (limited to 'tests/unittests') diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 1c2e45fe..7765e408 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -189,6 +189,12 @@ hn0: flags=8843 metric 0 mtu 1500 status: active """ + def setUp(self): + super(TestNetCfgDistro, self).setUp() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.add_patch('cloudinit.util.system_info', 'm_sysinfo') + self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} + def _get_distro(self, dname, renderers=None): cls = distros.fetch(dname) cfg = settings.CFG_BUILTIN diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index 0fa9cdb5..fa4b6cfe 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -22,6 +22,12 @@ bcfg = { class TestUGNormalize(TestCase): + def setUp(self): + super(TestUGNormalize, self).setUp() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.add_patch('cloudinit.util.system_info', 'm_sysinfo') + self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} + def _make_distro(self, dtype, def_user=None): cfg = dict(settings.CFG_BUILTIN) cfg['system_info']['distro'] = dtype diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 695897c0..1b3ca570 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -4,20 +4,21 @@ from cloudinit.config import cc_ntp from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, mock, skipUnlessJsonSchema) + CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) +import copy import os from os.path import dirname import shutil -NTP_TEMPLATE = b"""\ +NTP_TEMPLATE = """\ ## template: jinja servers {{servers}} pools {{pools}} """ -TIMESYNCD_TEMPLATE = b"""\ +TIMESYNCD_TEMPLATE = """\ ## template:jinja [Time] {% if servers or pools -%} @@ -32,56 +33,88 @@ class TestNtp(FilesystemMockingTestCase): def setUp(self): super(TestNtp, self).setUp() - self.subp = util.subp self.new_root = self.tmp_dir() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.m_snappy.return_value = False + self.add_patch('cloudinit.util.system_info', 'm_sysinfo') + self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} - def _get_cloud(self, distro): - self.patchUtils(self.new_root) + def _get_cloud(self, distro, sys_cfg=None): + self.new_root = self.reRoot(root=self.new_root) paths = helpers.Paths({'templates_dir': self.new_root}) cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - return cloud.Cloud(myds, paths, {}, mydist, None) + if not sys_cfg: + sys_cfg = {} + mydist = cls(distro, sys_cfg, paths) + myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, paths) + return cloud.Cloud(myds, paths, sys_cfg, mydist, None) + + def _get_template_path(self, template_name, distro, basepath=None): + # ntp.conf.{distro} -> ntp.conf.debian.tmpl + template_fn = '{0}.tmpl'.format( + template_name.replace('{distro}', distro)) + if not basepath: + basepath = self.new_root + path = os.path.join(basepath, template_fn) + return path + + def _generate_template(self, template=None): + if not template: + template = NTP_TEMPLATE + confpath = os.path.join(self.new_root, 'client.conf') + template_fn = os.path.join(self.new_root, 'client.conf.tmpl') + util.write_file(template_fn, content=template) + return (confpath, template_fn) + + def _mock_ntp_client_config(self, client=None, distro=None): + if not client: + client = 'ntp' + if not distro: + distro = 'ubuntu' + dcfg = cc_ntp.distro_ntp_client_configs(distro) + if client == 'systemd-timesyncd': + template = TIMESYNCD_TEMPLATE + else: + template = NTP_TEMPLATE + (confpath, template_fn) = self._generate_template(template=template) + ntpconfig = copy.deepcopy(dcfg[client]) + ntpconfig['confpath'] = confpath + ntpconfig['template_name'] = os.path.basename(confpath) + return ntpconfig @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install(self, mock_util): - """ntp_install installs via install_func when check_exe is absent.""" + """ntp_install_client runs install_func when check_exe is absent.""" mock_util.which.return_value = None # check_exe not found. install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=['ntpx'], check_exe='ntpdx') - + cc_ntp.install_ntp_client(install_func, + packages=['ntpx'], check_exe='ntpdx') mock_util.which.assert_called_with('ntpdx') install_func.assert_called_once_with(['ntpx']) @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install_not_needed(self, mock_util): - """ntp_install doesn't attempt install when check_exe is found.""" - mock_util.which.return_value = ["/usr/sbin/ntpd"] # check_exe found. + """ntp_install_client doesn't install when check_exe is found.""" + client = 'chrony' + mock_util.which.return_value = [client] # check_exe found. install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=['ntp'], check_exe='ntpd') + cc_ntp.install_ntp_client(install_func, packages=[client], + check_exe=client) install_func.assert_not_called() @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install_no_op_with_empty_pkg_list(self, mock_util): - """ntp_install calls install_func with empty list""" + """ntp_install_client runs install_func with empty list""" mock_util.which.return_value = None # check_exe not found install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=[], check_exe='timesyncd') + cc_ntp.install_ntp_client(install_func, packages=[], + check_exe='timesyncd') install_func.assert_called_once_with([]) - def test_ntp_rename_ntp_conf(self): - """When NTP_CONF exists, rename_ntp moves it.""" - ntpconf = self.tmp_path("ntp.conf", self.new_root) - util.write_file(ntpconf, "") - with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): - cc_ntp.rename_ntp_conf() - self.assertFalse(os.path.exists(ntpconf)) - self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) - @mock.patch("cloudinit.config.cc_ntp.util") def test_reload_ntp_defaults(self, mock_util): """Test service is restarted/reloaded (defaults)""" - service = 'ntp' + service = 'ntp_service_name' cmd = ['service', service, 'restart'] cc_ntp.reload_ntp(service) mock_util.subp.assert_called_with(cmd, capture=True) @@ -89,193 +122,171 @@ class TestNtp(FilesystemMockingTestCase): @mock.patch("cloudinit.config.cc_ntp.util") def test_reload_ntp_systemd(self, mock_util): """Test service is restarted/reloaded (systemd)""" - service = 'ntp' - cmd = ['systemctl', 'reload-or-restart', service] + service = 'ntp_service_name' cc_ntp.reload_ntp(service, systemd=True) - mock_util.subp.assert_called_with(cmd, capture=True) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_reload_ntp_systemd_timesycnd(self, mock_util): - """Test service is restarted/reloaded (systemd/timesyncd)""" - service = 'systemd-timesycnd' cmd = ['systemctl', 'reload-or-restart', service] - cc_ntp.reload_ntp(service, systemd=True) mock_util.subp.assert_called_with(cmd, capture=True) + def test_ntp_rename_ntp_conf(self): + """When NTP_CONF exists, rename_ntp moves it.""" + ntpconf = self.tmp_path("ntp.conf", self.new_root) + util.write_file(ntpconf, "") + cc_ntp.rename_ntp_conf(confpath=ntpconf) + self.assertFalse(os.path.exists(ntpconf)) + self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) + def test_ntp_rename_ntp_conf_skip_missing(self): """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" ntpconf = self.tmp_path("ntp.conf", self.new_root) self.assertFalse(os.path.exists(ntpconf)) - with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): - cc_ntp.rename_ntp_conf() + cc_ntp.rename_ntp_conf(confpath=ntpconf) self.assertFalse(os.path.exists("{0}.dist".format(ntpconf))) self.assertFalse(os.path.exists(ntpconf)) - def test_write_ntp_config_template_from_ntp_conf_tmpl_with_servers(self): - """write_ntp_config_template reads content from ntp.conf.tmpl. - - It reads ntp.conf.tmpl if present and renders the value from servers - key. When no pools key is defined, template is rendered using an empty - list for pools. - """ - distro = 'ubuntu' - cfg = { - 'servers': ['192.168.2.1', '192.168.2.2'] - } - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path("ntp.conf", self.new_root) # Doesn't exist - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents + def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): + """write_ntp_config_template reads from $client.conf.distro.tmpl""" + servers = [] + pools = ['10.0.0.1', '10.0.0.2'] + (confpath, template_fn) = self._generate_template() + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.write_ntp_config_template('ubuntu', + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( - "servers ['192.168.2.1', '192.168.2.2']\npools []\n", - content.decode()) + "servers []\npools ['10.0.0.1', '10.0.0.2']\n", content.decode()) - def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): - """write_ntp_config_template reads content from ntp.conf.distro.tmpl. + def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): + """write_ntp_config_template defaults pools servers upon empty config. - It reads ntp.conf..tmpl before attempting ntp.conf.tmpl. It - renders the value from the keys servers and pools. When no - servers value is present, template is rendered using an empty list. + When both pools and servers are empty, default NR_POOL_SERVERS get + configured. """ distro = 'ubuntu' - cfg = { - 'pools': ['10.0.0.1', '10.0.0.2'] - } - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - # Create ntp.conf.tmpl which isn't read - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(b'NOT READ: ntp.conf..tmpl is primary') - # Create ntp.conf.tmpl. - with open('{0}.{1}.tmpl'.format(ntp_conf, distro), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents + pools = cc_ntp.generate_server_names(distro) + servers = [] + (confpath, template_fn) = self._generate_template() + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.write_ntp_config_template(distro, + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( - "servers []\npools ['10.0.0.1', '10.0.0.2']\n", + "servers []\npools {0}\n".format(pools), content.decode()) - def test_write_ntp_config_template_defaults_pools_when_empty_lists(self): - """write_ntp_config_template defaults pools servers upon empty config. + def test_defaults_pools_empty_lists_sles(self): + """write_ntp_config_template defaults opensuse pools upon empty config. When both pools and servers are empty, default NR_POOL_SERVERS get configured. """ - distro = 'ubuntu' - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents - default_pools = [ - "{0}.{1}.pool.ntp.org".format(x, distro) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] + distro = 'sles' + default_pools = cc_ntp.generate_server_names(distro) + (confpath, template_fn) = self._generate_template() + + cc_ntp.write_ntp_config_template(distro, + servers=[], pools=[], + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents + for pool in default_pools: + self.assertIn('opensuse', pool) self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content.decode()) + "servers []\npools {0}\n".format(default_pools), content.decode()) self.assertIn( "Adding distro default ntp pool servers: {0}".format( ",".join(default_pools)), self.logs.getvalue()) - @mock.patch("cloudinit.config.cc_ntp.ntp_installable") - def test_ntp_handler_mocked_template(self, m_ntp_install): - """Test ntp handler renders ubuntu ntp.conf template.""" - pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] - servers = ['192.168.23.3', '192.168.23.4'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers - } - } - mycloud = self._get_cloud('ubuntu') - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - m_ntp_install.return_value = True - - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - with mock.patch.object(util, 'which', return_value=None): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - - content = util.read_file_or_url('file://' + ntp_conf).contents - self.assertEqual( - 'servers {0}\npools {1}\n'.format(servers, pools), - content.decode()) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_ntp_handler_mocked_template_snappy(self, m_util): - """Test ntp handler renders timesycnd.conf template on snappy.""" + def test_timesyncd_template(self): + """Test timesycnd template is correct""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers - } - } - mycloud = self._get_cloud('ubuntu') - m_util.system_is_snappy.return_value = True - - # Create timesyncd.conf.tmpl - tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root) - template = '{0}.tmpl'.format(tsyncd_conf) - with open(template, 'wb') as stream: - stream.write(TIMESYNCD_TEMPLATE) - - with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - - content = util.read_file_or_url('file://' + tsyncd_conf).contents + (confpath, template_fn) = self._generate_template( + template=TIMESYNCD_TEMPLATE) + cc_ntp.write_ntp_config_template('ubuntu', + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), content.decode()) - def test_ntp_handler_real_distro_templates(self): - """Test ntp handler renders the shipped distro ntp.conf templates.""" + def test_distro_ntp_client_configs(self): + """Test we have updated ntp client configs on different distros""" + delta = copy.deepcopy(cc_ntp.DISTRO_CLIENT_CONFIG) + base = copy.deepcopy(cc_ntp.NTP_CLIENT_CONFIG) + # confirm no-delta distros match the base config + for distro in cc_ntp.distros: + if distro not in delta: + result = cc_ntp.distro_ntp_client_configs(distro) + self.assertEqual(base, result) + # for distros with delta, ensure the merged config values match + # what is set in the delta + for distro in delta.keys(): + result = cc_ntp.distro_ntp_client_configs(distro) + for client in delta[distro].keys(): + for key in delta[distro][client].keys(): + self.assertEqual(delta[distro][client][key], + result[client][key]) + + def test_ntp_handler_real_distro_ntp_templates(self): + """Test ntp handler renders the shipped distro ntp client templates.""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers - } - } - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - for distro in ('debian', 'ubuntu', 'fedora', 'rhel', 'sles'): - mycloud = self._get_cloud(distro) - root_dir = dirname(dirname(os.path.realpath(util.__file__))) - tmpl_file = os.path.join( - '{0}/templates/ntp.conf.{1}.tmpl'.format(root_dir, distro)) - # Create a copy in our tmp_dir - shutil.copy( - tmpl_file, - os.path.join(self.new_root, 'ntp.conf.%s.tmpl' % distro)) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - with mock.patch.object(util, 'which', return_value=[True]): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - - content = util.read_file_or_url('file://' + ntp_conf).contents - expected_servers = '\n'.join([ - 'server {0} iburst'.format(server) for server in servers]) - self.assertIn( - expected_servers, content.decode(), - 'failed to render ntp.conf for distro:{0}'.format(distro)) - expected_pools = '\n'.join([ - 'pool {0} iburst'.format(pool) for pool in pools]) - self.assertIn( - expected_pools, content.decode(), - 'failed to render ntp.conf for distro:{0}'.format(distro)) + for client in ['ntp', 'systemd-timesyncd', 'chrony']: + for distro in cc_ntp.distros: + distro_cfg = cc_ntp.distro_ntp_client_configs(distro) + ntpclient = distro_cfg[client] + confpath = ( + os.path.join(self.new_root, ntpclient.get('confpath')[1:])) + template = ntpclient.get('template_name') + # find sourcetree template file + root_dir = ( + dirname(dirname(os.path.realpath(util.__file__))) + + '/templates') + source_fn = self._get_template_path(template, distro, + basepath=root_dir) + template_fn = self._get_template_path(template, distro) + # don't fail if cloud-init doesn't have a template for + # a distro,client pair + if not os.path.exists(source_fn): + continue + # Create a copy in our tmp_dir + shutil.copy(source_fn, template_fn) + cc_ntp.write_ntp_config_template(distro, servers=servers, + pools=pools, path=confpath, + template_fn=template_fn) + content = util.read_file_or_url('file://' + confpath).contents + if client in ['ntp', 'chrony']: + expected_servers = '\n'.join([ + 'server {0} iburst'.format(srv) for srv in servers]) + print('distro=%s client=%s' % (distro, client)) + self.assertIn(expected_servers, content.decode(), + ('failed to render {0} conf' + ' for distro:{1}'.format(client, distro))) + expected_pools = '\n'.join([ + 'pool {0} iburst'.format(pool) for pool in pools]) + self.assertIn(expected_pools, content.decode(), + ('failed to render {0} conf' + ' for distro:{1}'.format(client, distro))) + elif client == 'systemd-timesyncd': + expected_content = ( + "# cloud-init generated file\n" + + "# See timesyncd.conf(5) for details.\n\n" + + "[Time]\nNTP=%s %s \n" % (" ".join(servers), + " ".join(pools))) + self.assertEqual(expected_content, content.decode()) def test_no_ntpcfg_does_nothing(self): """When no ntp section is defined handler logs a warning and noops.""" @@ -285,95 +296,99 @@ class TestNtp(FilesystemMockingTestCase): 'not present or disabled by cfg\n', self.logs.getvalue()) - def test_ntp_handler_schema_validation_allows_empty_ntp_config(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_allows_empty_ntp_config(self, + m_select): """Ntp schema validation allows for an empty ntp: configuration.""" valid_empty_configs = [{'ntp': {}}, {'ntp': None}] - distro = 'ubuntu' - cc = self._get_cloud(distro) - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) for valid_empty_config in valid_empty_configs: - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', valid_empty_config, cc, None, []) - with open(ntp_conf) as stream: - content = stream.read() - default_pools = [ - "{0}.{1}.pool.ntp.org".format(x, distro) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] - self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content) - self.assertNotIn('Invalid config:', self.logs.getvalue()) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) + content = util.read_file_or_url('file://' + confpath).contents + pools = cc_ntp.generate_server_names(mycloud.distro.name) + self.assertEqual( + "servers []\npools {0}\n".format(pools), content.decode()) + self.assertNotIn('Invalid config:', self.logs.getvalue()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_non_string_item_type(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_non_string_item_type(self, + m_sel): """Ntp schema validation warns of non-strings in pools or servers. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" - "ntp.servers.1: None is not of type 'string'", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual("servers ['valid', None]\npools [123]\n", content) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_sel.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" + "ntp.servers.1: None is not of type 'string'", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual("servers ['valid', None]\npools [123]\n", + content.decode()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_of_non_array_type(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_of_non_array_type(self, + m_select): """Ntp schema validation warns of non-array pools or servers types. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp.pools: 123 is not of type 'array'\n" - "ntp.servers: 'non-array' is not of type 'array'", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual("servers non-array\npools 123\n", content) + + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools: 123 is not of type 'array'\n" + "ntp.servers: 'non-array' is not of type 'array'", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual("servers non-array\npools 123\n", + content.decode()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_invalid_key_present(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_invalid_key_present(self, + m_select): """Ntp schema validation warns of invalid keys present in ntp config. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = { 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp: Additional properties are not allowed " - "('invalidkey' was unexpected)", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual( - "servers []\npools ['0.mycompany.pool.ntp.org']\n", - content) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp: Additional properties are not allowed " + "('invalidkey' was unexpected)", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools ['0.mycompany.pool.ntp.org']\n", + content.decode()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_of_duplicates(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select): """Ntp schema validation warns of duplicates in servers or pools. Schema validation is not strict, so ntp config is still be rendered. @@ -381,74 +396,334 @@ class TestNtp(FilesystemMockingTestCase): invalid_config = { 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'], 'servers': ['10.0.0.1', '10.0.0.1']}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org'] has " - "non-unique elements\nntp.servers: ['10.0.0.1', '10.0.0.1'] has " - "non-unique elements", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual( - "servers ['10.0.0.1', '10.0.0.1']\n" - "pools ['0.mypool.org', '0.mypool.org']\n", - content) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']" + " has non-unique elements\nntp.servers: " + "['10.0.0.1', '10.0.0.1'] has non-unique elements", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers ['10.0.0.1', '10.0.0.1']\n" + "pools ['0.mypool.org', '0.mypool.org']\n", content.decode()) - @mock.patch("cloudinit.config.cc_ntp.ntp_installable") - def test_ntp_handler_timesyncd(self, m_ntp_install): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_timesyncd(self, m_select): """Test ntp handler configures timesyncd""" - m_ntp_install.return_value = False - distro = 'ubuntu' - cfg = { - 'servers': ['192.168.2.1', '192.168.2.2'], - 'pools': ['0.mypool.org'], - } - mycloud = self._get_cloud(distro) - tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root) - # Create timesyncd.conf.tmpl - template = '{0}.tmpl'.format(tsyncd_conf) - print(template) - with open(template, 'wb') as stream: - stream.write(TIMESYNCD_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf): - cc_ntp.write_ntp_config_template(cfg, mycloud, tsyncd_conf, - template='timesyncd.conf') - - content = util.read_file_or_url('file://' + tsyncd_conf).contents - self.assertEqual( - "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", - content.decode()) + servers = ['192.168.2.1', '192.168.2.2'] + pools = ['0.mypool.org'] + cfg = {'ntp': {'servers': servers, 'pools': pools}} + client = 'systemd-timesyncd' + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro, + client=client) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", + content.decode()) + + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_enabled_false(self, m_select): + """Test ntp handler does not run if enabled: false """ + cfg = {'ntp': {'enabled': False}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + self.assertEqual(0, m_select.call_count) + + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + @mock.patch("cloudinit.distros.Distro.uses_systemd") + def test_ntp_the_whole_package(self, m_sysd, m_select): + """Test enabled config renders template, and restarts service """ + cfg = {'ntp': {'enabled': True}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + service_name = ntpconfig['service_name'] + m_select.return_value = ntpconfig + pools = cc_ntp.generate_server_names(mycloud.distro.name) + # force uses systemd path + m_sysd.return_value = True + with mock.patch('cloudinit.config.cc_ntp.util') as m_util: + # allow use of util.mergemanydict + m_util.mergemanydict.side_effect = util.mergemanydict + # default client is present + m_util.which.return_value = True + # use the config 'enabled' value + m_util.is_false.return_value = util.is_false( + cfg['ntp']['enabled']) + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + m_util.subp.assert_called_with( + ['systemctl', 'reload-or-restart', + service_name], capture=True) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools {0}\n".format(pools), + content.decode()) + + def test_opensuse_picks_chrony(self): + """Test opensuse picks chrony or ntp on certain distro versions""" + # < 15.0 => ntp + self.m_sysinfo.return_value = {'dist': + ('openSUSE', '13.2', 'Harlequin')} + mycloud = self._get_cloud('opensuse') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('ntp', expected_client) + + # >= 15.0 and not openSUSE => chrony + self.m_sysinfo.return_value = {'dist': + ('SLES', '15.0', + 'SUSE Linux Enterprise Server 15')} + mycloud = self._get_cloud('sles') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('chrony', expected_client) + + # >= 15.0 and openSUSE and ver != 42 => chrony + self.m_sysinfo.return_value = {'dist': ('openSUSE Tumbleweed', + '20180326', + 'timbleweed')} + mycloud = self._get_cloud('opensuse') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('chrony', expected_client) + + def test_ubuntu_xenial_picks_ntp(self): + """Test Ubuntu picks ntp on xenial release""" + + self.m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')} + mycloud = self._get_cloud('ubuntu') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('ntp', expected_client) - def test_write_ntp_config_template_defaults_pools_empty_lists_sles(self): - """write_ntp_config_template defaults pools servers upon empty config. + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_snappy_system_picks_timesyncd(self, m_which): + """Test snappy systems prefer installed clients""" - When both pools and servers are empty, default NR_POOL_SERVERS get - configured. - """ - distro = 'sles' - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents - default_pools = [ - "{0}.opensuse.pool.ntp.org".format(x) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] - self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content.decode()) - self.assertIn( - "Adding distro default ntp pool servers: {0}".format( - ",".join(default_pools)), - self.logs.getvalue()) + # we are on ubuntu-core here + self.m_snappy.return_value = True + # ubuntu core systems will have timesyncd installed + m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd', + None, None, None]) + distro = 'ubuntu' + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = 'systemd-timesyncd' + expected_cfg = distro_configs[expected_client] + expected_calls = [] + # we only get to timesyncd + for client in mycloud.distro.preferred_ntp_clients[0:2]: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + result = cc_ntp.select_ntp_client(None, mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + self.assertEqual(sorted(expected_cfg), sorted(result)) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_distro_searches_all_preferred_clients(self, m_which): + """Test select_ntp_client search all distro perferred clients """ + # nothing is installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = mycloud.distro.preferred_ntp_clients[0] + expected_cfg = distro_configs[expected_client] + expected_calls = [] + for client in mycloud.distro.preferred_ntp_clients: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + cc_ntp.select_ntp_client({}, mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which): + """Test user_cfg.ntp_client='auto' defaults to distro search""" + # nothing is installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = mycloud.distro.preferred_ntp_clients[0] + expected_cfg = distro_configs[expected_client] + expected_calls = [] + for client in mycloud.distro.preferred_ntp_clients: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + cc_ntp.select_ntp_client('auto', mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + + @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template') + @mock.patch('cloudinit.cloud.Cloud.get_template_filename') + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_custom_client_overrides_installed_clients(self, m_which, + m_tmpfn, m_write): + """Test user client is installed despite other clients present """ + client = 'ntpdate' + cfg = {'ntp': {'ntp_client': client}} + for distro in cc_ntp.distros: + # client is not installed + m_which.side_effect = iter([None]) + mycloud = self._get_cloud(distro) + with mock.patch.object(mycloud.distro, + 'install_packages') as m_install: + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + m_install.assert_called_with([client]) + m_which.assert_called_with(client) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which): + """Test distro system_config overrides builtin preferred ntp clients""" + system_client = 'chrony' + sys_cfg = {'ntp_client': system_client} + # no clients installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_cfg = distro_configs[system_client] + result = cc_ntp.select_ntp_client(None, mycloud.distro) + self.assertEqual(sorted(expected_cfg), sorted(result)) + m_which.assert_has_calls([]) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_user_config_overrides_system_cfg(self, m_which): + """Test user-data overrides system_config ntp_client""" + system_client = 'chrony' + sys_cfg = {'ntp_client': system_client} + user_client = 'systemd-timesyncd' + # no clients installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_cfg = distro_configs[user_client] + result = cc_ntp.select_ntp_client(user_client, mycloud.distro) + self.assertEqual(sorted(expected_cfg), sorted(result)) + m_which.assert_has_calls([]) + + @mock.patch('cloudinit.config.cc_ntp.reload_ntp') + @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') + def test_ntp_user_provided_config_with_template(self, m_install, m_reload): + custom = r'\n#MyCustomTemplate' + user_template = NTP_TEMPLATE + custom + confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf') + cfg = { + 'ntp': { + 'pools': ['mypool.org'], + 'ntp_client': 'myntpd', + 'config': { + 'check_exe': 'myntpd', + 'confpath': confpath, + 'packages': ['myntp'], + 'service_name': 'myntp', + 'template': user_template, + } + } + } + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools ['mypool.org']\n%s" % custom, + content.decode()) + + @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') + @mock.patch('cloudinit.config.cc_ntp.reload_ntp') + @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_user_provided_config_template_only(self, m_select, m_install, + m_reload, m_schema): + """Test custom template for default client""" + custom = r'\n#MyCustomTemplate' + user_template = NTP_TEMPLATE + custom + client = 'chrony' + cfg = { + 'pools': ['mypool.org'], + 'ntp_client': client, + 'config': { + 'template': user_template, + } + } + expected_merged_cfg = { + 'check_exe': 'chronyd', + 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root), + 'template_name': 'client.conf', 'template': user_template, + 'service_name': 'chrony', 'packages': ['chrony']} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(client=client, + distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.handle('notimportant', + {'ntp': cfg}, mycloud, None, None) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools ['mypool.org']\n%s" % custom, + content.decode()) + m_schema.assert_called_with(expected_merged_cfg) + + +class TestSupplementalSchemaValidation(CiTestCase): + + def test_error_on_missing_keys(self): + """ValueError raised reporting any missing required ntp:config keys""" + cfg = {} + match = (r'Invalid ntp configuration:\\nMissing required ntp:config' + ' keys: check_exe, confpath, packages, service_name') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_requiring_either_template_or_template_name(self): + """ValueError raised if both template not template_name are None.""" + cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', + 'template': None, 'template_name': None, 'packages': []} + match = (r'Invalid ntp configuration:\\nEither ntp:config:template' + ' or ntp:config:template_name values are required') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_on_non_list_values(self): + """ValueError raised when packages is not of type list.""" + cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', + 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'} + match = (r'Invalid ntp configuration:\\nExpected a list of required' + ' package names for ntp:config:packages. Found \(NOPE\)') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_on_non_string_values(self): + """ValueError raised for any values expected as string type.""" + cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3, + 'template': 4, 'template_name': 5, 'packages': []} + errors = [ + 'Expected a config file path ntp:config:confpath. Found (1)', + 'Expected a string type for ntp:config:check_exe. Found (2)', + 'Expected a string type for ntp:config:service_name. Found (3)', + 'Expected a string type for ntp:config:template. Found (4)', + 'Expected a string type for ntp:config:template_name. Found (5)'] + with self.assertRaises(ValueError) as context_mgr: + cc_ntp.supplemental_schema_validation(cfg) + error_msg = str(context_mgr.exception) + for error in errors: + self.assertIn(error, error_msg) # vi: ts=4 expandtab -- cgit v1.2.3 From 4b86ab9a25b512420ecfe98953a3f3a6e4b4bba1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 12 Apr 2018 15:51:07 -0600 Subject: renderer: support unicode in render_from_file. If a file passed to render_from_file had non-ascii text then jinja in python2 would decode as ascii, which would cause UnicodeDecodeError. This issue can be re-created in python2 with just: 'can\xe2\x80\x99t'.decode() The solution here is to explicitly pass in unicode supporting type (py3 str, py2 unicode). Those are six.text_type. Then jinja does not try to decode. The reason we hit this is that load_file calls decode_binary. decode_binary believes it has no work to do if it got a six.string_types. isinstance('can\xe2\x80\x99t', six.string_types) == True So it returns the original string which will blow up for jinja. Our fix here then is to load the file in binary mode and explicitly decode it to utf-8. Then in python2 we'll have a unicode type and in python3 we'll have a string type. --- cloudinit/templater.py | 10 +++++- templates/chrony.conf.debian.tmpl | 2 +- templates/chrony.conf.ubuntu.tmpl | 2 +- tests/unittests/test_handler/test_handler_ntp.py | 4 +-- tests/unittests/test_templating.py | 41 +++++++++++++++++++++++- 5 files changed, 53 insertions(+), 6 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/templater.py b/cloudinit/templater.py index b3ea64e4..9a087e1c 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -121,7 +121,11 @@ def detect_template(text): def render_from_file(fn, params): if not params: params = {} - template_type, renderer, content = detect_template(util.load_file(fn)) + # jinja in python2 uses unicode internally. All py2 str will be decoded. + # If it is given a str that has non-ascii then it will raise a + # UnicodeDecodeError. So we explicitly convert to unicode type here. + template_type, renderer, content = detect_template( + util.load_file(fn, decode=False).decode('utf-8')) LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type) return renderer(content, params) @@ -132,11 +136,15 @@ def render_to_file(fn, outfn, params, mode=0o644): def render_string_to_file(content, outfn, params, mode=0o644): + """Render string (or py2 unicode) to file. + Warning: py2 str with non-ascii chars will cause UnicodeDecodeError.""" contents = render_string(content, params) util.write_file(outfn, contents, mode=mode) def render_string(content, params): + """Render string (or py2 unicode). + Warning: py2 str with non-ascii chars will cause UnicodeDecodeError.""" if not params: params = {} template_type, renderer, content = detect_template(content) diff --git a/templates/chrony.conf.debian.tmpl b/templates/chrony.conf.debian.tmpl index e72bfee1..661bf04e 100644 --- a/templates/chrony.conf.debian.tmpl +++ b/templates/chrony.conf.debian.tmpl @@ -30,7 +30,7 @@ logdir /var/log/chrony maxupdateskew 100.0 # This directive enables kernel synchronisation (every 11 minutes) of the -# real-time clock. Note that it can't be used along with the 'rtcfile' directive. +# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. rtcsync # Step the system clock instead of slewing it if the adjustment is larger than diff --git a/templates/chrony.conf.ubuntu.tmpl b/templates/chrony.conf.ubuntu.tmpl index da7f16a4..50a6f518 100644 --- a/templates/chrony.conf.ubuntu.tmpl +++ b/templates/chrony.conf.ubuntu.tmpl @@ -34,7 +34,7 @@ logdir /var/log/chrony maxupdateskew 100.0 # This directive enables kernel synchronisation (every 11 minutes) of the -# real-time clock. Note that it can't be used along with the 'rtcfile' directive. +# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. rtcsync # Step the system clock instead of slewing it if the adjustment is larger than diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 1b3ca570..02676aa6 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -272,12 +272,12 @@ class TestNtp(FilesystemMockingTestCase): expected_servers = '\n'.join([ 'server {0} iburst'.format(srv) for srv in servers]) print('distro=%s client=%s' % (distro, client)) - self.assertIn(expected_servers, content.decode(), + self.assertIn(expected_servers, content.decode('utf-8'), ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) expected_pools = '\n'.join([ 'pool {0} iburst'.format(pool) for pool in pools]) - self.assertIn(expected_pools, content.decode(), + self.assertIn(expected_pools, content.decode('utf-8'), ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) elif client == 'systemd-timesyncd': diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 53154d33..1080e135 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -10,6 +10,7 @@ from cloudinit.tests import helpers as test_helpers import textwrap from cloudinit import templater +from cloudinit.util import load_file, write_file try: import Cheetah @@ -19,7 +20,17 @@ except ImportError: HAS_CHEETAH = False -class TestTemplates(test_helpers.TestCase): +class TestTemplates(test_helpers.CiTestCase): + jinja_utf8 = b'It\xe2\x80\x99s not ascii, {{name}}\n' + jinja_utf8_rbob = b'It\xe2\x80\x99s not ascii, bob\n'.decode('utf-8') + + @staticmethod + def add_header(renderer, data): + """Return text (py2 unicode/py3 str) with template header.""" + if isinstance(data, bytes): + data = data.decode('utf-8') + return "## template: %s\n" % renderer + data + def test_render_basic(self): in_data = textwrap.dedent(""" ${b} @@ -106,4 +117,32 @@ $a,$b''' 'codename': codename}) self.assertEqual(ex_data, out_data) + def test_jinja_nonascii_render_to_string(self): + """Test jinja render_to_string with non-ascii content.""" + self.assertEqual( + templater.render_string( + self.add_header("jinja", self.jinja_utf8), {"name": "bob"}), + self.jinja_utf8_rbob) + + def test_jinja_nonascii_render_to_file(self): + """Test jinja render_to_file of a filename with non-ascii content.""" + tmpl_fn = self.tmp_path("j-render-to-file.template") + out_fn = self.tmp_path("j-render-to-file.out") + write_file(filename=tmpl_fn, omode="wb", + content=self.add_header( + "jinja", self.jinja_utf8).encode('utf-8')) + templater.render_to_file(tmpl_fn, out_fn, {"name": "bob"}) + result = load_file(out_fn, decode=False).decode('utf-8') + self.assertEqual(result, self.jinja_utf8_rbob) + + def test_jinja_nonascii_render_from_file(self): + """Test jinja render_from_file with non-ascii content.""" + tmpl_fn = self.tmp_path("j-render-from-file.template") + write_file(tmpl_fn, omode="wb", + content=self.add_header( + "jinja", self.jinja_utf8).encode('utf-8')) + result = templater.render_from_file(tmpl_fn, {"name": "bob"}) + self.assertEqual(result, self.jinja_utf8_rbob) + + # vi: ts=4 expandtab -- cgit v1.2.3 From acca826adf39ddfedde78cfbfc47e81a06c6f42a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 18 Apr 2018 03:35:41 -0600 Subject: pycodestyle: Fix invalid escape sequences in string literals. Python has deprecated these invalid string literals now https://bugs.python.org/issue27364 and pycodestyle is identifying them with a W605 warning. https://github.com/PyCQA/pycodestyle/pull/676 So basically, any use of \ not followed by one of [\'"abfnrtv] or \ooo (octal) \xhh (hex) or a newline is invalid. This is most comomnly seen for us in regex. To solve, you either: a.) use a raw string r'...' b.) correctly escape the \ that was not intended to be interpreted. --- cloudinit/analyze/__main__.py | 2 +- cloudinit/config/cc_apt_configure.py | 2 +- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/config/cc_rsyslog.py | 4 ++-- cloudinit/distros/freebsd.py | 6 +++--- cloudinit/util.py | 6 +++--- tests/cloud_tests/testcases/base.py | 2 +- tests/unittests/test_util.py | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 3ba5903f..f8613656 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -69,7 +69,7 @@ def analyze_blame(name, args): """ (infh, outfh) = configure_io(args) blame_format = ' %ds (%n)' - r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE) + r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE) for idx, record in enumerate(show.show_events(_get_events(infh), blame_format)): srecs = sorted(filter(r.match, record), reverse=True) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 5b9cbca0..afaca464 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -121,7 +121,7 @@ and https protocols respectively. The ``proxy`` key also exists as an alias for All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults -to ``^[\w-]+:\w`` +to ``^[\\w-]+:\\w`` **Add source list entries:** diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 4da3a588..50b37470 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -74,7 +74,7 @@ def givecmdline(pid): if util.is_FreeBSD(): (output, _err) = util.subp(['procstat', '-c', str(pid)]) line = output.splitlines()[1] - m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line) + m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index af08788c..27d2366c 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -203,8 +203,8 @@ LOG = logging.getLogger(__name__) COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') HOST_PORT_RE = re.compile( r'^(?P[@]{0,2})' - '(([[](?P[^\]]*)[\]])|(?P[^:]*))' - '([:](?P[0-9]+))?$') + r'(([[](?P[^\]]*)[\]])|(?P[^:]*))' + r'([:](?P[0-9]+))?$') def reload_syslog(command=DEF_RELOAD, systemd=False): diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 754d3df6..099fac5c 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -110,7 +110,7 @@ class Distro(distros.Distro): if dev.startswith('lo'): return dev - n = re.search('\d+$', dev) + n = re.search(r'\d+$', dev) index = n.group(0) (out, err) = util.subp(['ifconfig', '-a']) @@ -118,7 +118,7 @@ class Distro(distros.Distro): if len(x.split()) > 0] bsddev = 'NOT_FOUND' for line in ifconfigoutput: - m = re.match('^\w+', line) + m = re.match(r'^\w+', line) if m: if m.group(0).startswith('lo'): continue @@ -128,7 +128,7 @@ class Distro(distros.Distro): break # Replace the index with the one we're after. - bsddev = re.sub('\d+$', index, bsddev) + bsddev = re.sub(r'\d+$', index, bsddev) LOG.debug("Using network interface %s", bsddev) return bsddev diff --git a/cloudinit/util.py b/cloudinit/util.py index acdc0d85..1717b529 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1446,7 +1446,7 @@ def get_config_logfiles(cfg): for fmt in get_output_cfg(cfg, None): if not fmt: continue - match = re.match('(?P\||>+)\s*(?P.*)', fmt) + match = re.match(r'(?P\||>+)\s*(?P.*)', fmt) if not match: continue target = match.group('target') @@ -2275,8 +2275,8 @@ def parse_mount(path): # the regex is a bit complex. to better understand this regex see: # https://regex101.com/r/2F6c1k/1 # https://regex101.com/r/T2en7a/1 - regex = r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + \ - '(?=(?:type)[\s]+([\S]+)|\(([^,]*))' + regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))') for line in mount_locs: m = re.search(regex, line) if not m: diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 7598d469..4fda8f91 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -235,7 +235,7 @@ class CloudTestCase(unittest.TestCase): 'found unexpected kvm availability-zone %s' % v1_data['availability-zone']) self.assertIsNotNone( - re.match('[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}', + re.match(r'[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}', v1_data['instance-id']), 'kvm instance-id is not a UUID: %s' % v1_data['instance-id']) self.assertIn('ubuntu', v1_data['local-hostname']) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 50101906..7cbd5538 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -800,7 +800,7 @@ class TestSubp(helpers.CiTestCase): os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) self.assertRaisesRegex(util.ProcessExecutionError, - 'Missing #! in script\?', + r'Missing #! in script\?', util.subp, (noshebang,)) def test_returns_none_if_no_capture(self): -- cgit v1.2.3 From 4c573d0e0173d2b1e99a383c54a0a6c957aa1cbb Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Wed, 18 Apr 2018 13:55:17 -0400 Subject: DataSourceSmartOS: fix hang when metadata service is down If the metadata service in the host is down while a guest that uses DataSourceSmartOS is booting, the request from the guest falls into the bit bucket. When the metadata service is eventually started, the guest has no awareness of this and does not resend the request. This results in cloud-init hanging forever with a guest reboot as the only recovery option. This fix updates the metadata protocol to implement the initialization phase, just as is implemented by mdata-get and related utilities. The initialization phase includes draining all pending data from the serial port, writing an empty command and getting an expected error message in reply. If the initialization phase times out, it is retried every five seconds. Each timeout results in a warning message: "Timeout while initializing metadata client. Is the host metadata service running?" By default, warning messages are logged to the console, thus the reason for a hung boot is readily apparent. LP: #1667735 --- cloudinit/sources/DataSourceSmartOS.py | 117 +++++++++++++++++++++--- tests/unittests/test_datasource/test_smartos.py | 103 ++++++++++++++++++++- 2 files changed, 204 insertions(+), 16 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 5dd8a125..c8998b40 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -1,4 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. +# Copyright (c) 2018, Joyent, Inc. # # Author: Ben Howard # @@ -21,6 +22,7 @@ import base64 import binascii +import errno import json import os import random @@ -229,6 +231,9 @@ class DataSourceSmartOS(sources.DataSource): self.md_client) return False + # Open once for many requests, rather than once for each request + self.md_client.open_transport() + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): smartos_noun, strip = attribute md[ci_noun] = self.md_client.get(smartos_noun, strip=strip) @@ -236,6 +241,8 @@ class DataSourceSmartOS(sources.DataSource): for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items(): md[ci_noun] = self.md_client.get_json(smartos_noun) + self.md_client.close_transport() + # @datadictionary: This key may contain a program that is written # to a file in the filesystem of the guest on each boot and then # executed. It may be of any format that would be considered @@ -316,6 +323,10 @@ class JoyentMetadataFetchException(Exception): pass +class JoyentMetadataTimeoutException(JoyentMetadataFetchException): + pass + + class JoyentMetadataClient(object): """ A client implementing v2 of the Joyent Metadata Protocol Specification. @@ -360,6 +371,47 @@ class JoyentMetadataClient(object): LOG.debug('Value "%s" found.', value) return value + def _readline(self): + """ + Reads a line a byte at a time until \n is encountered. Returns an + ascii string with the trailing newline removed. + + If a timeout (per-byte) is set and it expires, a + JoyentMetadataFetchException will be thrown. + """ + response = [] + + def as_ascii(): + return b''.join(response).decode('ascii') + + msg = "Partial response: '%s'" + while True: + try: + byte = self.fp.read(1) + if len(byte) == 0: + raise JoyentMetadataTimeoutException(msg % as_ascii()) + if byte == b'\n': + return as_ascii() + response.append(byte) + except OSError as exc: + if exc.errno == errno.EAGAIN: + raise JoyentMetadataTimeoutException(msg % as_ascii()) + raise + + def _write(self, msg): + self.fp.write(msg.encode('ascii')) + self.fp.flush() + + def _negotiate(self): + LOG.debug('Negotiating protocol V2') + self._write('NEGOTIATE V2\n') + response = self._readline() + LOG.debug('read "%s"', response) + if response != 'V2_OK': + raise JoyentMetadataFetchException( + 'Invalid response "%s" to "NEGOTIATE V2"' % response) + LOG.debug('Negotiation complete') + def request(self, rtype, param=None): request_id = '{0:08x}'.format(random.randint(0, 0xffffffff)) message_body = ' '.join((request_id, rtype,)) @@ -374,18 +426,11 @@ class JoyentMetadataClient(object): self.open_transport() need_close = True - self.fp.write(msg.encode('ascii')) - self.fp.flush() - - response = bytearray() - response.extend(self.fp.read(1)) - while response[-1:] != b'\n': - response.extend(self.fp.read(1)) - + self._write(msg) + response = self._readline() if need_close: self.close_transport() - response = response.rstrip().decode('ascii') LOG.debug('Read "%s" from metadata transport.', response) if 'SUCCESS' not in response: @@ -450,6 +495,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(self.socketpath) self.fp = sock.makefile('rwb') + self._negotiate() def exists(self): return os.path.exists(self.socketpath) @@ -459,8 +505,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient): class JoyentMetadataSerialClient(JoyentMetadataClient): - def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM): - super(JoyentMetadataSerialClient, self).__init__(smartos_type) + def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM, + fp=None): + super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp) self.device = device self.timeout = timeout @@ -468,10 +515,50 @@ class JoyentMetadataSerialClient(JoyentMetadataClient): return os.path.exists(self.device) def open_transport(self): - ser = serial.Serial(self.device, timeout=self.timeout) - if not ser.isOpen(): - raise SystemError("Unable to open %s" % self.device) - self.fp = ser + if self.fp is None: + ser = serial.Serial(self.device, timeout=self.timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % self.device) + self.fp = ser + self._flush() + self._negotiate() + + def _flush(self): + LOG.debug('Flushing input') + # Read any pending data + timeout = self.fp.timeout + self.fp.timeout = 0.1 + while True: + try: + self._readline() + except JoyentMetadataTimeoutException: + break + LOG.debug('Input empty') + + # Send a newline and expect "invalid command". Keep trying until + # successful. Retry rather frequently so that the "Is the host + # metadata service running" appears on the console soon after someone + # attaches in an effort to debug. + if timeout > 5: + self.fp.timeout = 5 + else: + self.fp.timeout = timeout + while True: + LOG.debug('Writing newline, expecting "invalid command"') + self._write('\n') + try: + response = self._readline() + if response == 'invalid command': + break + if response == 'FAILURE': + LOG.debug('Got "FAILURE". Retrying.') + continue + LOG.warning('Unexpected response "%s" during flush', response) + except JoyentMetadataTimeoutException: + LOG.warning('Timeout while initializing metadata client. ' + + 'Is the host metadata service running?') + LOG.debug('Got "invalid command". Flush complete.') + self.fp.timeout = timeout def __repr__(self): return "%s(device=%s, timeout=%s)" % ( diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 88bae5f9..2bea7a17 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -1,4 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. +# Copyright (c) 2018, Joyent, Inc. # # Author: Ben Howard # @@ -324,6 +325,7 @@ class PsuedoJoyentClient(object): if data is None: data = MOCK_RETURNS.copy() self.data = data + self._is_open = False return def get(self, key, default=None, strip=False): @@ -344,6 +346,14 @@ class PsuedoJoyentClient(object): def exists(self): return True + def open_transport(self): + assert(not self._is_open) + self._is_open = True + + def close_transport(self): + assert(self._is_open) + self._is_open = False + class TestSmartOSDataSource(FilesystemMockingTestCase): def setUp(self): @@ -592,8 +602,46 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): mydscfg['disk_aliases']['FOO']) +class ShortReader(object): + """Implements a 'read' interface for bytes provided. + much like io.BytesIO but the 'endbyte' acts as if EOF. + When it is reached a short will be returned.""" + def __init__(self, initial_bytes, endbyte=b'\0'): + self.data = initial_bytes + self.index = 0 + self.len = len(self.data) + self.endbyte = endbyte + + @property + def emptied(self): + return self.index >= self.len + + def read(self, size=-1): + """Read size bytes but not past a null.""" + if size == 0 or self.index >= self.len: + return b'' + + rsize = size + if size < 0 or size + self.index > self.len: + rsize = self.len - self.index + + next_null = self.data.find(self.endbyte, self.index, rsize) + if next_null >= 0: + rsize = next_null - self.index + 1 + i = self.index + self.index += rsize + ret = self.data[i:i + rsize] + if len(ret) and ret[-1:] == self.endbyte: + ret = ret[:-1] + return ret + + class TestJoyentMetadataClient(FilesystemMockingTestCase): + invalid = b'invalid command\n' + failure = b'FAILURE\n' + v2_ok = b'V2_OK\n' + def setUp(self): super(TestJoyentMetadataClient, self).setUp() @@ -636,6 +684,11 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): return DataSourceSmartOS.JoyentMetadataClient( fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM) + def _get_serial_client(self): + self.serial.timeout = 1 + return DataSourceSmartOS.JoyentMetadataSerialClient(None, + fp=self.serial) + def assertEndsWith(self, haystack, prefix): self.assertTrue(haystack.endswith(prefix), "{0} does not end with '{1}'".format( @@ -646,12 +699,14 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): "{0} does not start with '{1}'".format( repr(haystack), prefix)) + def assertNoMoreSideEffects(self, obj): + self.assertRaises(StopIteration, obj) + def test_get_metadata_writes_a_single_line(self): client = self._get_client() client.get('some_key') self.assertEqual(1, self.serial.write.call_count) written_line = self.serial.write.call_args[0][0] - print(type(written_line)) self.assertEndsWith(written_line.decode('ascii'), b'\n'.decode('ascii')) self.assertEqual(1, written_line.count(b'\n')) @@ -737,6 +792,52 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): client._checksum = lambda _: self.response_parts['crc'] self.assertIsNone(client.get('some_key')) + def test_negotiate(self): + client = self._get_client() + reader = ShortReader(self.v2_ok) + client.fp.read.side_effect = reader.read + client._negotiate() + self.assertTrue(reader.emptied) + + def test_negotiate_short_response(self): + client = self._get_client() + # chopped '\n' from v2_ok. + reader = ShortReader(self.v2_ok[:-1] + b'\0') + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException, + client._negotiate) + self.assertTrue(reader.emptied) + + def test_negotiate_bad_response(self): + client = self._get_client() + reader = ShortReader(b'garbage\n' + self.v2_ok) + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client._negotiate) + self.assertEqual(self.v2_ok, client.fp.read()) + + def test_serial_open_transport(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_failure(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage' + b'\0' + self.failure + + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_many_timeouts(self): + client = self._get_serial_client() + reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + class TestNetworkConversion(TestCase): def test_convert_simple(self): -- cgit v1.2.3 From 6d48d265a0548a2dc23e587f2a335d4e38e8db90 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 18 Apr 2018 15:22:42 -0600 Subject: net: Depend on iproute2's ip instead of net-tools ifconfig or route MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The net-tools package is deprecated and will eventually be dropped. Use "ip route", "link" or "address" instead of "ifconfig" or "route" calls. Cloud-init can now run in an environment that no longer has net-tools. This affects the network and route printing emitted to cloud-config-output.log as well as the cc_disable_ec2_metadata module. Additional changes:  - separate readResource and resourceLocation into standalone test    functions  - Fix ipv4 address rows to report scopes represented by ip addr show  - Formatted route/address ouput now handles multiple ipv4 and ipv6    addresses on a single interface Co-authored-by: James Hogarth Co-authored-by: Robert Schweikert --- cloudinit/config/cc_disable_ec2_metadata.py | 14 +- .../config/tests/test_disable_ec2_metadata.py | 50 +++ cloudinit/net/network_state.py | 11 +- cloudinit/netinfo.py | 345 ++++++++++++++++----- cloudinit/tests/helpers.py | 40 +-- cloudinit/tests/test_netinfo.py | 186 ++++++----- tests/data/netinfo/netdev-formatted-output | 10 + tests/data/netinfo/new-ifconfig-output | 18 ++ tests/data/netinfo/old-ifconfig-output | 18 ++ tests/data/netinfo/route-formatted-output | 22 ++ tests/data/netinfo/sample-ipaddrshow-output | 13 + tests/data/netinfo/sample-iproute-output-v4 | 3 + tests/data/netinfo/sample-iproute-output-v6 | 11 + tests/data/netinfo/sample-route-output-v4 | 5 + tests/data/netinfo/sample-route-output-v6 | 13 + tests/unittests/test_filters/test_launch_index.py | 10 +- tests/unittests/test_merging.py | 2 +- tests/unittests/test_runs/test_merge_run.py | 2 +- tests/unittests/test_util.py | 14 +- 19 files changed, 583 insertions(+), 204 deletions(-) create mode 100644 cloudinit/config/tests/test_disable_ec2_metadata.py create mode 100644 tests/data/netinfo/netdev-formatted-output create mode 100644 tests/data/netinfo/new-ifconfig-output create mode 100644 tests/data/netinfo/old-ifconfig-output create mode 100644 tests/data/netinfo/route-formatted-output create mode 100644 tests/data/netinfo/sample-ipaddrshow-output create mode 100644 tests/data/netinfo/sample-iproute-output-v4 create mode 100644 tests/data/netinfo/sample-iproute-output-v6 create mode 100644 tests/data/netinfo/sample-route-output-v4 create mode 100644 tests/data/netinfo/sample-route-output-v6 (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index c56319b5..885b3138 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -32,13 +32,23 @@ from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject'] +REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject'] +REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254'] def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: - util.subp(REJECT_CMD, capture=False) + reject_cmd = None + if util.which('ip'): + reject_cmd = REJECT_CMD_IP + elif util.which('ifconfig'): + reject_cmd = REJECT_CMD_IF + else: + log.error(('Neither "route" nor "ip" command found, unable to ' + 'manipulate routing table')) + return + util.subp(reject_cmd, capture=False) else: log.debug(("Skipping module named %s," " disabling the ec2 route not enabled"), name) diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py new file mode 100644 index 00000000..67646b03 --- /dev/null +++ b/cloudinit/config/tests/test_disable_ec2_metadata.py @@ -0,0 +1,50 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests cc_disable_ec2_metadata handler""" + +import cloudinit.config.cc_disable_ec2_metadata as ec2_meta + +from cloudinit.tests.helpers import CiTestCase, mock + +import logging + +LOG = logging.getLogger(__name__) + +DISABLE_CFG = {'disable_ec2_metadata': 'true'} + + +class TestEC2MetadataRoute(CiTestCase): + + with_logs = True + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + def test_disable_ifconfig(self, m_subp, m_which): + """Set the route if ifconfig command is available""" + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_subp.assert_called_with( + ['route', 'add', '-host', '169.254.169.254', 'reject'], + capture=False) + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + def test_disable_ip(self, m_subp, m_which): + """Set the route if ip command is available""" + m_which.side_effect = lambda x: x if x == 'ip' else None + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_subp.assert_called_with( + ['ip', 'route', 'add', 'prohibit', '169.254.169.254'], + capture=False) + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + def test_disable_no_tool(self, m_subp, m_which): + """Log error when neither route nor ip commands are available""" + m_which.return_value = None # Find neither ifconfig nor ip + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + self.assertEqual( + [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 6d63e5c5..72c803eb 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -7,6 +7,8 @@ import copy import functools import logging +import socket +import struct import six @@ -886,12 +888,9 @@ def net_prefix_to_ipv4_mask(prefix): This is the inverse of ipv4_mask_to_net_prefix. 24 -> "255.255.255.0" Also supports input as a string.""" - - mask = [0, 0, 0, 0] - for i in list(range(0, int(prefix))): - idx = int(i / 8) - mask[idx] = mask[idx] + (1 << (7 - i % 8)) - return ".".join([str(x) for x in mask]) + mask = socket.inet_ntoa( + struct.pack(">I", (0xffffffff << (32 - int(prefix)) & 0xffffffff))) + return mask def ipv4_mask_to_net_prefix(mask): diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 993b26cf..f0906160 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -8,9 +8,11 @@ # # This file is part of cloud-init. See LICENSE file for license information. +from copy import copy, deepcopy import re from cloudinit import log as logging +from cloudinit.net.network_state import net_prefix_to_ipv4_mask from cloudinit import util from cloudinit.simpletable import SimpleTable @@ -18,18 +20,90 @@ from cloudinit.simpletable import SimpleTable LOG = logging.getLogger() -def netdev_info(empty=""): - fields = ("hwaddr", "addr", "bcast", "mask") - (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1]) +DEFAULT_NETDEV_INFO = { + "ipv4": [], + "ipv6": [], + "hwaddr": "", + "up": False +} + + +def _netdev_info_iproute(ipaddr_out): + """ + Get network device dicts from ip route and ip link info. + + @param ipaddr_out: Output string from 'ip addr show' command. + + @returns: A dict of device info keyed by network device name containing + device configuration values. + @raise: TypeError if ipaddr_out isn't a string. + """ devs = {} - for line in str(ifcfg_out).splitlines(): + dev_name = None + for num, line in enumerate(ipaddr_out.splitlines()): + m = re.match(r'^\d+:\s(?P[^:]+):\s+<(?P\S+)>\s+.*', line) + if m: + dev_name = m.group('dev').lower().split('@')[0] + flags = m.group('flags').split(',') + devs[dev_name] = { + 'ipv4': [], 'ipv6': [], 'hwaddr': '', + 'up': bool('UP' in flags and 'LOWER_UP' in flags), + } + elif 'inet6' in line: + m = re.match( + r'\s+inet6\s(?P\S+)\sscope\s(?P\S+).*', line) + if not m: + LOG.warning( + 'Could not parse ip addr show: (line:%d) %s', num, line) + continue + devs[dev_name]['ipv6'].append(m.groupdict()) + elif 'inet' in line: + m = re.match( + r'\s+inet\s(?P\S+)(\sbrd\s(?P\S+))?\sscope\s' + r'(?P\S+).*', line) + if not m: + LOG.warning( + 'Could not parse ip addr show: (line:%d) %s', num, line) + continue + match = m.groupdict() + cidr4 = match.pop('cidr4') + addr, _, prefix = cidr4.partition('/') + if not prefix: + prefix = '32' + devs[dev_name]['ipv4'].append({ + 'ip': addr, + 'bcast': match['bcast'] if match['bcast'] else '', + 'mask': net_prefix_to_ipv4_mask(prefix), + 'scope': match['scope']}) + elif 'link' in line: + m = re.match( + r'\s+link/(?P\S+)\s(?P\S+).*', line) + if not m: + LOG.warning( + 'Could not parse ip addr show: (line:%d) %s', num, line) + continue + if m.group('link_type') == 'ether': + devs[dev_name]['hwaddr'] = m.group('hwaddr') + else: + devs[dev_name]['hwaddr'] = '' + else: + continue + return devs + + +def _netdev_info_ifconfig(ifconfig_data): + # fields that need to be returned in devs for each dev + devs = {} + for line in ifconfig_data.splitlines(): if len(line) == 0: continue if line[0] not in ("\t", " "): curdev = line.split()[0] - devs[curdev] = {"up": False} - for field in fields: - devs[curdev][field] = "" + # current ifconfig pops a ':' on the end of the device + if curdev.endswith(':'): + curdev = curdev[:-1] + if curdev not in devs: + devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO) toks = line.lower().strip().split() if toks[0] == "up": devs[curdev]['up'] = True @@ -39,41 +113,50 @@ def netdev_info(empty=""): if re.search(r"flags=\d+", toks[i + 1]) + if res: + devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) + return devs + + +def netdev_info(empty=""): + devs = {} + if util.which('ip'): + # Try iproute first of all + (ipaddr_out, _err) = util.subp(["ip", "addr", "show"]) + devs = _netdev_info_iproute(ipaddr_out) + elif util.which('ifconfig'): + # Fall back to net-tools if iproute2 is not present + (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1]) + devs = _netdev_info_ifconfig(ifcfg_out) + else: + LOG.warning( + "Could not print networks: missing 'ip' and 'ifconfig' commands") if empty != "": for (_devname, dev) in devs.items(): @@ -84,14 +167,94 @@ def netdev_info(empty=""): return devs -def route_info(): - (route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1]) +def _netdev_route_info_iproute(iproute_data): + """ + Get network route dicts from ip route info. + + @param iproute_data: Output string from ip route command. + + @returns: A dict containing ipv4 and ipv6 route entries as lists. Each + item in the list is a route dictionary representing destination, + gateway, flags, genmask and interface information. + """ + + routes = {} + routes['ipv4'] = [] + routes['ipv6'] = [] + entries = iproute_data.splitlines() + default_route_entry = { + 'destination': '', 'flags': '', 'gateway': '', 'genmask': '', + 'iface': '', 'metric': ''} + for line in entries: + entry = copy(default_route_entry) + if not line: + continue + toks = line.split() + flags = ['U'] + if toks[0] == "default": + entry['destination'] = "0.0.0.0" + entry['genmask'] = "0.0.0.0" + else: + if '/' in toks[0]: + (addr, cidr) = toks[0].split("/") + else: + addr = toks[0] + cidr = '32' + flags.append("H") + entry['genmask'] = net_prefix_to_ipv4_mask(cidr) + entry['destination'] = addr + entry['genmask'] = net_prefix_to_ipv4_mask(cidr) + entry['gateway'] = "0.0.0.0" + for i in range(len(toks)): + if toks[i] == "via": + entry['gateway'] = toks[i + 1] + flags.insert(1, "G") + if toks[i] == "dev": + entry["iface"] = toks[i + 1] + if toks[i] == "metric": + entry['metric'] = toks[i + 1] + entry['flags'] = ''.join(flags) + routes['ipv4'].append(entry) + try: + (iproute_data6, _err6) = util.subp( + ["ip", "--oneline", "-6", "route", "list", "table", "all"], + rcs=[0, 1]) + except util.ProcessExecutionError: + pass + else: + entries6 = iproute_data6.splitlines() + for line in entries6: + entry = {} + if not line: + continue + toks = line.split() + if toks[0] == "default": + entry['destination'] = "::/0" + entry['flags'] = "UG" + else: + entry['destination'] = toks[0] + entry['gateway'] = "::" + entry['flags'] = "U" + for i in range(len(toks)): + if toks[i] == "via": + entry['gateway'] = toks[i + 1] + entry['flags'] = "UG" + if toks[i] == "dev": + entry["iface"] = toks[i + 1] + if toks[i] == "metric": + entry['metric'] = toks[i + 1] + if toks[i] == "expires": + entry['flags'] = entry['flags'] + 'e' + routes['ipv6'].append(entry) + return routes + +def _netdev_route_info_netstat(route_data): routes = {} routes['ipv4'] = [] routes['ipv6'] = [] - entries = route_out.splitlines()[1:] + entries = route_data.splitlines() for line in entries: if not line: continue @@ -101,8 +264,8 @@ def route_info(): # default 10.65.0.1 UGS 0 34920 vtnet0 # # Linux netstat shows 2 more: - # Destination Gateway Genmask Flags MSS Window irtt Iface - # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0 + # Destination Gateway Genmask Flags Metric Ref Use Iface + # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0 if (len(toks) < 6 or toks[0] == "Kernel" or toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Internet6" or toks[0] == "Routing"): @@ -125,31 +288,57 @@ def route_info(): routes['ipv4'].append(entry) try: - (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"], - rcs=[0, 1]) + (route_data6, _err6) = util.subp( + ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1]) except util.ProcessExecutionError: pass else: - entries6 = route_out6.splitlines()[1:] + entries6 = route_data6.splitlines() for line in entries6: if not line: continue toks = line.split() - if (len(toks) < 6 or toks[0] == "Kernel" or + if (len(toks) < 7 or toks[0] == "Kernel" or + toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Proto" or toks[0] == "Active"): continue entry = { - 'proto': toks[0], - 'recv-q': toks[1], - 'send-q': toks[2], - 'local address': toks[3], - 'foreign address': toks[4], - 'state': toks[5], + 'destination': toks[0], + 'gateway': toks[1], + 'flags': toks[2], + 'metric': toks[3], + 'ref': toks[4], + 'use': toks[5], + 'iface': toks[6], } + # skip lo interface on ipv6 + if entry['iface'] == "lo": + continue + # strip /128 from address if it's included + if entry['destination'].endswith('/128'): + entry['destination'] = re.sub( + r'\/128$', '', entry['destination']) routes['ipv6'].append(entry) return routes +def route_info(): + routes = {} + if util.which('ip'): + # Try iproute first of all + (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"]) + routes = _netdev_route_info_iproute(iproute_out) + elif util.which('netstat'): + # Fall back to net-tools if iproute2 is not present + (route_out, _err) = util.subp( + ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1]) + routes = _netdev_route_info_netstat(route_out) + else: + LOG.warning( + "Could not print routes: missing 'ip' and 'netstat' commands") + return routes + + def getgateway(): try: routes = route_info() @@ -166,21 +355,30 @@ def netdev_pformat(): lines = [] try: netdev = netdev_info(empty=".") - except Exception: - lines.append(util.center("Net device info failed", '!', 80)) + except Exception as e: + lines.append( + util.center( + "Net device info failed ({error})".format(error=str(e)), + '!', 80)) else: + if not netdev: + return '\n' fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address'] tbl = SimpleTable(fields) - for (dev, d) in sorted(netdev.items()): - tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]]) - if d.get('addr6'): - tbl.add_row([dev, d["up"], - d["addr6"], ".", d.get("scope6"), d["hwaddr"]]) + for (dev, data) in sorted(netdev.items()): + for addr in data.get('ipv4'): + tbl.add_row( + [dev, data["up"], addr["ip"], addr["mask"], + addr.get('scope', '.'), data["hwaddr"]]) + for addr in data.get('ipv6'): + tbl.add_row( + [dev, data["up"], addr["ip"], ".", addr["scope6"], + data["hwaddr"]]) netdev_s = tbl.get_string() max_len = len(max(netdev_s.splitlines(), key=len)) header = util.center("Net device info", "+", max_len) lines.extend([header, netdev_s]) - return "\n".join(lines) + return "\n".join(lines) + "\n" def route_pformat(): @@ -188,7 +386,10 @@ def route_pformat(): try: routes = route_info() except Exception as e: - lines.append(util.center('Route info failed', '!', 80)) + lines.append( + util.center( + 'Route info failed ({error})'.format(error=str(e)), + '!', 80)) util.logexc(LOG, "Route info failed: %s" % e) else: if routes.get('ipv4'): @@ -205,20 +406,20 @@ def route_pformat(): header = util.center("Route IPv4 info", "+", max_len) lines.extend([header, route_s]) if routes.get('ipv6'): - fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q', - 'Local Address', 'Foreign Address', 'State'] + fields_v6 = ['Route', 'Destination', 'Gateway', 'Interface', + 'Flags'] tbl_v6 = SimpleTable(fields_v6) for (n, r) in enumerate(routes.get('ipv6')): route_id = str(n) - tbl_v6.add_row([route_id, r['proto'], - r['recv-q'], r['send-q'], - r['local address'], r['foreign address'], - r['state']]) + if r['iface'] == 'lo': + continue + tbl_v6.add_row([route_id, r['destination'], + r['gateway'], r['iface'], r['flags']]) route_s = tbl_v6.get_string() max_len = len(max(route_s.splitlines(), key=len)) header = util.center("Route IPv6 info", "+", max_len) lines.extend([header, route_s]) - return "\n".join(lines) + return "\n".join(lines) + "\n" def debug_info(prefix='ci-info: '): diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 999b1d7c..82fd347b 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -190,35 +190,11 @@ class ResourceUsingTestCase(CiTestCase): super(ResourceUsingTestCase, self).setUp() self.resource_path = None - def resourceLocation(self, subname=None): - if self.resource_path is None: - paths = [ - os.path.join('tests', 'data'), - os.path.join('data'), - os.path.join(os.pardir, 'tests', 'data'), - os.path.join(os.pardir, 'data'), - ] - for p in paths: - if os.path.isdir(p): - self.resource_path = p - break - self.assertTrue((self.resource_path and - os.path.isdir(self.resource_path)), - msg="Unable to locate test resource data path!") - if not subname: - return self.resource_path - return os.path.join(self.resource_path, subname) - - def readResource(self, name): - where = self.resourceLocation(name) - with open(where, 'r') as fh: - return fh.read() - def getCloudPaths(self, ds=None): tmpdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmpdir) cp = ch.Paths({'cloud_dir': tmpdir, - 'templates_dir': self.resourceLocation()}, + 'templates_dir': resourceLocation()}, ds=ds) return cp @@ -234,7 +210,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): ResourceUsingTestCase.tearDown(self) def replicateTestRoot(self, example_root, target_root): - real_root = self.resourceLocation() + real_root = resourceLocation() real_root = os.path.join(real_root, 'roots', example_root) for (dir_path, _dirnames, filenames) in os.walk(real_root): real_path = dir_path @@ -399,6 +375,18 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs): p.stop() +def resourceLocation(subname=None): + path = os.path.join('tests', 'data') + if not subname: + return path + return os.path.join(path, subname) + + +def readResource(name, mode='r'): + with open(resourceLocation(name), mode) as fh: + return fh.read() + + try: skipIf = unittest.skipIf except AttributeError: diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py index 7dea2e41..2537c1c2 100644 --- a/cloudinit/tests/test_netinfo.py +++ b/cloudinit/tests/test_netinfo.py @@ -2,105 +2,121 @@ """Tests netinfo module functions and classes.""" +from copy import copy + from cloudinit.netinfo import netdev_pformat, route_pformat -from cloudinit.tests.helpers import CiTestCase, mock +from cloudinit.tests.helpers import CiTestCase, mock, readResource # Example ifconfig and route output -SAMPLE_IFCONFIG_OUT = """\ -enp0s25 Link encap:Ethernet HWaddr 50:7b:9d:2c:af:91 - inet addr:192.168.2.18 Bcast:192.168.2.255 Mask:255.255.255.0 - inet6 addr: fe80::8107:2b92:867e:f8a6/64 Scope:Link - UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 - RX packets:8106427 errors:55 dropped:0 overruns:0 frame:37 - TX packets:9339739 errors:0 dropped:0 overruns:0 carrier:0 - collisions:0 txqueuelen:1000 - RX bytes:4953721719 (4.9 GB) TX bytes:7731890194 (7.7 GB) - Interrupt:20 Memory:e1200000-e1220000 - -lo Link encap:Local Loopback - inet addr:127.0.0.1 Mask:255.0.0.0 - inet6 addr: ::1/128 Scope:Host - UP LOOPBACK RUNNING MTU:65536 Metric:1 - RX packets:579230851 errors:0 dropped:0 overruns:0 frame:0 - TX packets:579230851 errors:0 dropped:0 overruns:0 carrier:0 - collisions:0 txqueuelen:1 -""" - -SAMPLE_ROUTE_OUT = '\n'.join([ - '0.0.0.0 192.168.2.1 0.0.0.0 UG 0 0 0' - ' enp0s25', - '0.0.0.0 192.168.2.1 0.0.0.0 UG 0 0 0' - ' wlp3s0', - '192.168.2.0 0.0.0.0 255.255.255.0 U 0 0 0' - ' enp0s25']) - - -NETDEV_FORMATTED_OUT = '\n'.join([ - '+++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++' - '++++++++++++++++++++', - '+---------+------+------------------------------+---------------+-------+' - '-------------------+', - '| Device | Up | Address | Mask | Scope |' - ' Hw-Address |', - '+---------+------+------------------------------+---------------+-------+' - '-------------------+', - '| enp0s25 | True | 192.168.2.18 | 255.255.255.0 | . |' - ' 50:7b:9d:2c:af:91 |', - '| enp0s25 | True | fe80::8107:2b92:867e:f8a6/64 | . | link |' - ' 50:7b:9d:2c:af:91 |', - '| lo | True | 127.0.0.1 | 255.0.0.0 | . |' - ' . |', - '| lo | True | ::1/128 | . | host |' - ' . |', - '+---------+------+------------------------------+---------------+-------+' - '-------------------+']) - -ROUTE_FORMATTED_OUT = '\n'.join([ - '+++++++++++++++++++++++++++++Route IPv4 info++++++++++++++++++++++++++' - '+++', - '+-------+-------------+-------------+---------------+-----------+-----' - '--+', - '| Route | Destination | Gateway | Genmask | Interface | Flags' - ' |', - '+-------+-------------+-------------+---------------+-----------+' - '-------+', - '| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | wlp3s0 |' - ' UG |', - '| 1 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | enp0s25 |' - ' U |', - '+-------+-------------+-------------+---------------+-----------+' - '-------+', - '++++++++++++++++++++++++++++++++++++++++Route IPv6 info++++++++++' - '++++++++++++++++++++++++++++++', - '+-------+-------------+-------------+---------------+---------------+' - '-----------------+-------+', - '| Route | Proto | Recv-Q | Send-Q | Local Address |' - ' Foreign Address | State |', - '+-------+-------------+-------------+---------------+---------------+' - '-----------------+-------+', - '| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | UG |' - ' 0 | 0 |', - '| 1 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | U |' - ' 0 | 0 |', - '+-------+-------------+-------------+---------------+---------------+' - '-----------------+-------+']) +SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") +SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") +SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") +SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") +SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") +SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") +SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") +NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") +ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") class TestNetInfo(CiTestCase): maxDiff = None + with_logs = True + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_old_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering old nettools info.""" + m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + self.assertEqual(NETDEV_FORMATTED_OUT, content) + @mock.patch('cloudinit.netinfo.util.which') @mock.patch('cloudinit.netinfo.util.subp') - def test_netdev_pformat(self, m_subp): - """netdev_pformat properly rendering network device information.""" - m_subp.return_value = (SAMPLE_IFCONFIG_OUT, '') + def test_netdev_new_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None content = netdev_pformat() self.assertEqual(NETDEV_FORMATTED_OUT, content) + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_iproute_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering ip route info.""" + m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '') + m_which.side_effect = lambda x: x if x == 'ip' else None + content = netdev_pformat() + new_output = copy(NETDEV_FORMATTED_OUT) + # ip route show describes global scopes on ipv4 addresses + # whereas ifconfig does not. Add proper global/host scope to output. + new_output = new_output.replace('| . | 50:7b', '| global | 50:7b') + new_output = new_output.replace( + '255.0.0.0 | . |', '255.0.0.0 | host |') + self.assertEqual(new_output, content) + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_warn_on_missing_commands(self, m_subp, m_which): + """netdev_pformat warns when missing both ip and 'netstat'.""" + m_which.return_value = None # Niether ip nor netstat found + content = netdev_pformat() + self.assertEqual('\n', content) + self.assertEqual( + "WARNING: Could not print networks: missing 'ip' and 'ifconfig'" + " commands\n", + self.logs.getvalue()) + m_subp.assert_not_called() + + @mock.patch('cloudinit.netinfo.util.which') @mock.patch('cloudinit.netinfo.util.subp') - def test_route_pformat(self, m_subp): - """netdev_pformat properly rendering network device information.""" - m_subp.return_value = (SAMPLE_ROUTE_OUT, '') + def test_route_nettools_pformat(self, m_subp, m_which): + """route_pformat properly rendering nettools route info.""" + + def subp_netstat_route_selector(*args, **kwargs): + if args[0] == ['netstat', '--route', '--numeric', '--extend']: + return (SAMPLE_ROUTE_OUT_V4, '') + if args[0] == ['netstat', '-A', 'inet6', '--route', '--numeric']: + return (SAMPLE_ROUTE_OUT_V6, '') + raise Exception('Unexpected subp call %s' % args[0]) + + m_subp.side_effect = subp_netstat_route_selector + m_which.side_effect = lambda x: x if x == 'netstat' else None content = route_pformat() self.assertEqual(ROUTE_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_route_iproute_pformat(self, m_subp, m_which): + """route_pformat properly rendering ip route info.""" + + def subp_iproute_selector(*args, **kwargs): + if ['ip', '-o', 'route', 'list'] == args[0]: + return (SAMPLE_IPROUTE_OUT_V4, '') + v6cmd = ['ip', '--oneline', '-6', 'route', 'list', 'table', 'all'] + if v6cmd == args[0]: + return (SAMPLE_IPROUTE_OUT_V6, '') + raise Exception('Unexpected subp call %s' % args[0]) + + m_subp.side_effect = subp_iproute_selector + m_which.side_effect = lambda x: x if x == 'ip' else None + content = route_pformat() + self.assertEqual(ROUTE_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_route_warn_on_missing_commands(self, m_subp, m_which): + """route_pformat warns when missing both ip and 'netstat'.""" + m_which.return_value = None # Niether ip nor netstat found + content = route_pformat() + self.assertEqual('\n', content) + self.assertEqual( + "WARNING: Could not print routes: missing 'ip' and 'netstat'" + " commands\n", + self.logs.getvalue()) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/tests/data/netinfo/netdev-formatted-output b/tests/data/netinfo/netdev-formatted-output new file mode 100644 index 00000000..283ab4a4 --- /dev/null +++ b/tests/data/netinfo/netdev-formatted-output @@ -0,0 +1,10 @@ ++++++++++++++++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++++++++++++++ ++---------+------+------------------------------+---------------+--------+-------------------+ +| Device | Up | Address | Mask | Scope | Hw-Address | ++---------+------+------------------------------+---------------+--------+-------------------+ +| enp0s25 | True | 192.168.2.18 | 255.255.255.0 | . | 50:7b:9d:2c:af:91 | +| enp0s25 | True | fe80::7777:2222:1111:eeee/64 | . | global | 50:7b:9d:2c:af:91 | +| enp0s25 | True | fe80::8107:2b92:867e:f8a6/64 | . | link | 50:7b:9d:2c:af:91 | +| lo | True | 127.0.0.1 | 255.0.0.0 | . | . | +| lo | True | ::1/128 | . | host | . | ++---------+------+------------------------------+---------------+--------+-------------------+ diff --git a/tests/data/netinfo/new-ifconfig-output b/tests/data/netinfo/new-ifconfig-output new file mode 100644 index 00000000..83d4ad16 --- /dev/null +++ b/tests/data/netinfo/new-ifconfig-output @@ -0,0 +1,18 @@ +enp0s25: flags=4163 mtu 1500 + inet 192.168.2.18 netmask 255.255.255.0 broadcast 192.168.2.255 + inet6 fe80::7777:2222:1111:eeee prefixlen 64 scopeid 0x30 + inet6 fe80::8107:2b92:867e:f8a6 prefixlen 64 scopeid 0x20 + ether 50:7b:9d:2c:af:91 txqueuelen 1000 (Ethernet) + RX packets 3017 bytes 10601563 (10.1 MiB) + RX errors 0 dropped 39 overruns 0 frame 0 + TX packets 2627 bytes 196976 (192.3 KiB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 + +lo: flags=73 mtu 65536 + inet 127.0.0.1 netmask 255.0.0.0 + inet6 ::1 prefixlen 128 scopeid 0x10 + loop txqueuelen 1 (Local Loopback) + RX packets 0 bytes 0 (0.0 B) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 0 bytes 0 (0.0 B) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 diff --git a/tests/data/netinfo/old-ifconfig-output b/tests/data/netinfo/old-ifconfig-output new file mode 100644 index 00000000..e01f763e --- /dev/null +++ b/tests/data/netinfo/old-ifconfig-output @@ -0,0 +1,18 @@ +enp0s25 Link encap:Ethernet HWaddr 50:7b:9d:2c:af:91 + inet addr:192.168.2.18 Bcast:192.168.2.255 Mask:255.255.255.0 + inet6 addr: fe80::7777:2222:1111:eeee/64 Scope:Global + inet6 addr: fe80::8107:2b92:867e:f8a6/64 Scope:Link + UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 + RX packets:8106427 errors:55 dropped:0 overruns:0 frame:37 + TX packets:9339739 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:4953721719 (4.9 GB) TX bytes:7731890194 (7.7 GB) + Interrupt:20 Memory:e1200000-e1220000 + +lo Link encap:Local Loopback + inet addr:127.0.0.1 Mask:255.0.0.0 + inet6 addr: ::1/128 Scope:Host + UP LOOPBACK RUNNING MTU:65536 Metric:1 + RX packets:579230851 errors:0 dropped:0 overruns:0 frame:0 + TX packets:579230851 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1 diff --git a/tests/data/netinfo/route-formatted-output b/tests/data/netinfo/route-formatted-output new file mode 100644 index 00000000..9d2c5dd3 --- /dev/null +++ b/tests/data/netinfo/route-formatted-output @@ -0,0 +1,22 @@ ++++++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++++ ++-------+-------------+-------------+---------------+-----------+-------+ +| Route | Destination | Gateway | Genmask | Interface | Flags | ++-------+-------------+-------------+---------------+-----------+-------+ +| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | enp0s25 | UG | +| 1 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | wlp3s0 | UG | +| 2 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | enp0s25 | U | ++-------+-------------+-------------+---------------+-----------+-------+ ++++++++++++++++++++++++++++++++++++Route IPv6 info+++++++++++++++++++++++++++++++++++ ++-------+---------------------------+---------------------------+-----------+-------+ +| Route | Destination | Gateway | Interface | Flags | ++-------+---------------------------+---------------------------+-----------+-------+ +| 0 | 2a00:abcd:82ae:cd33::657 | :: | enp0s25 | Ue | +| 1 | 2a00:abcd:82ae:cd33::/64 | :: | enp0s25 | U | +| 2 | 2a00:abcd:82ae:cd33::/56 | fe80::32ee:54de:cd43:b4e1 | enp0s25 | UG | +| 3 | fd81:123f:654::657 | :: | enp0s25 | U | +| 4 | fd81:123f:654::/64 | :: | enp0s25 | U | +| 5 | fd81:123f:654::/48 | fe80::32ee:54de:cd43:b4e1 | enp0s25 | UG | +| 6 | fe80::abcd:ef12:bc34:da21 | :: | enp0s25 | U | +| 7 | fe80::/64 | :: | enp0s25 | U | +| 8 | ::/0 | fe80::32ee:54de:cd43:b4e1 | enp0s25 | UG | ++-------+---------------------------+---------------------------+-----------+-------+ diff --git a/tests/data/netinfo/sample-ipaddrshow-output b/tests/data/netinfo/sample-ipaddrshow-output new file mode 100644 index 00000000..b2fa2672 --- /dev/null +++ b/tests/data/netinfo/sample-ipaddrshow-output @@ -0,0 +1,13 @@ +1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever + inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever +2: enp0s25: mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000 + link/ether 50:7b:9d:2c:af:91 brd ff:ff:ff:ff:ff:ff + inet 192.168.2.18/24 brd 192.168.2.255 scope global dynamic enp0s25 + valid_lft 84174sec preferred_lft 84174sec + inet6 fe80::7777:2222:1111:eeee/64 scope global + valid_lft forever preferred_lft forever + inet6 fe80::8107:2b92:867e:f8a6/64 scope link + valid_lft forever preferred_lft forever + diff --git a/tests/data/netinfo/sample-iproute-output-v4 b/tests/data/netinfo/sample-iproute-output-v4 new file mode 100644 index 00000000..904cb034 --- /dev/null +++ b/tests/data/netinfo/sample-iproute-output-v4 @@ -0,0 +1,3 @@ +default via 192.168.2.1 dev enp0s25 proto static metric 100 +default via 192.168.2.1 dev wlp3s0 proto static metric 150 +192.168.2.0/24 dev enp0s25 proto kernel scope link src 192.168.2.18 metric 100 diff --git a/tests/data/netinfo/sample-iproute-output-v6 b/tests/data/netinfo/sample-iproute-output-v6 new file mode 100644 index 00000000..12bb1c12 --- /dev/null +++ b/tests/data/netinfo/sample-iproute-output-v6 @@ -0,0 +1,11 @@ +2a00:abcd:82ae:cd33::657 dev enp0s25 proto kernel metric 256 expires 2334sec pref medium +2a00:abcd:82ae:cd33::/64 dev enp0s25 proto ra metric 100 pref medium +2a00:abcd:82ae:cd33::/56 via fe80::32ee:54de:cd43:b4e1 dev enp0s25 proto ra metric 100 pref medium +fd81:123f:654::657 dev enp0s25 proto kernel metric 256 pref medium +fd81:123f:654::/64 dev enp0s25 proto ra metric 100 pref medium +fd81:123f:654::/48 via fe80::32ee:54de:cd43:b4e1 dev enp0s25 proto ra metric 100 pref medium +fe80::abcd:ef12:bc34:da21 dev enp0s25 proto static metric 100 pref medium +fe80::/64 dev enp0s25 proto kernel metric 256 pref medium +default via fe80::32ee:54de:cd43:b4e1 dev enp0s25 proto static metric 100 pref medium +local ::1 dev lo table local proto none metric 0 pref medium +local 2600:1f16:b80:ad00:90a:c915:bca6:5ff2 dev lo table local proto none metric 0 pref medium diff --git a/tests/data/netinfo/sample-route-output-v4 b/tests/data/netinfo/sample-route-output-v4 new file mode 100644 index 00000000..ecc31d96 --- /dev/null +++ b/tests/data/netinfo/sample-route-output-v4 @@ -0,0 +1,5 @@ +Kernel IP routing table +Destination Gateway Genmask Flags Metric Ref Use Iface +0.0.0.0 192.168.2.1 0.0.0.0 UG 100 0 0 enp0s25 +0.0.0.0 192.168.2.1 0.0.0.0 UG 150 0 0 wlp3s0 +192.168.2.0 0.0.0.0 255.255.255.0 U 100 0 0 enp0s25 diff --git a/tests/data/netinfo/sample-route-output-v6 b/tests/data/netinfo/sample-route-output-v6 new file mode 100644 index 00000000..4712b73c --- /dev/null +++ b/tests/data/netinfo/sample-route-output-v6 @@ -0,0 +1,13 @@ +Kernel IPv6 routing table +Destination Next Hop Flag Met Re Use If +2a00:abcd:82ae:cd33::657/128 :: Ue 256 1 0 enp0s25 +2a00:abcd:82ae:cd33::/64 :: U 100 1 0 enp0s25 +2a00:abcd:82ae:cd33::/56 fe80::32ee:54de:cd43:b4e1 UG 100 1 0 enp0s25 +fd81:123f:654::657/128 :: U 256 1 0 enp0s25 +fd81:123f:654::/64 :: U 100 1 0 enp0s25 +fd81:123f:654::/48 fe80::32ee:54de:cd43:b4e1 UG 100 1 0 enp0s25 +fe80::abcd:ef12:bc34:da21/128 :: U 100 1 2 enp0s25 +fe80::/64 :: U 256 1 16880 enp0s25 +::/0 fe80::32ee:54de:cd43:b4e1 UG 100 1 0 enp0s25 +::/0 :: !n -1 1424956 lo +::1/128 :: Un 0 4 26289 lo diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py index 6364d38e..e1a5d2c8 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/test_filters/test_launch_index.py @@ -55,7 +55,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): return True def testMultiEmailIndex(self): - test_data = self.readResource('filter_cloud_multipart_2.email') + test_data = helpers.readResource('filter_cloud_multipart_2.email') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) self.assertTrue(count_messages(message) > 0) @@ -70,7 +70,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertCounts(message, expected_counts) def testHeaderEmailIndex(self): - test_data = self.readResource('filter_cloud_multipart_header.email') + test_data = helpers.readResource('filter_cloud_multipart_header.email') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) self.assertTrue(count_messages(message) > 0) @@ -85,7 +85,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertCounts(message, expected_counts) def testConfigEmailIndex(self): - test_data = self.readResource('filter_cloud_multipart_1.email') + test_data = helpers.readResource('filter_cloud_multipart_1.email') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) self.assertTrue(count_messages(message) > 0) @@ -99,7 +99,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertCounts(message, expected_counts) def testNoneIndex(self): - test_data = self.readResource('filter_cloud_multipart.yaml') + test_data = helpers.readResource('filter_cloud_multipart.yaml') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) start_count = count_messages(message) @@ -108,7 +108,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertTrue(self.equivalentMessage(message, filtered_message)) def testIndexes(self): - test_data = self.readResource('filter_cloud_multipart.yaml') + test_data = helpers.readResource('filter_cloud_multipart.yaml') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) start_count = count_messages(message) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index f51358da..3a5072c7 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -100,7 +100,7 @@ def make_dict(max_depth, seed=None): class TestSimpleRun(helpers.ResourceUsingTestCase): def _load_merge_files(self): - merge_root = self.resourceLocation('merge_sources') + merge_root = helpers.resourceLocation('merge_sources') tests = [] source_ids = collections.defaultdict(list) expected_files = {} diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index 5d3f1ca3..d1ac4942 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -25,7 +25,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): 'cloud_init_modules': ['write-files'], 'system_info': {'paths': {'run_dir': new_root}} } - ud = self.readResource('user_data.1.txt') + ud = helpers.readResource('user_data.1.txt') cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) util.write_file(os.path.join(new_root, 'etc', diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 7cbd5538..e04ea031 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -325,7 +325,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_precise_ext4_root(self): - lines = self.readResource('mountinfo_precise_ext4.txt').splitlines() + lines = helpers.readResource('mountinfo_precise_ext4.txt').splitlines() expected = ('/dev/mapper/vg0-root', 'ext4', '/') self.assertEqual(expected, util.parse_mount_info('/', lines)) @@ -347,7 +347,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): self.assertEqual(expected, util.parse_mount_info('/run/lock', lines)) def test_raring_btrfs_root(self): - lines = self.readResource('mountinfo_raring_btrfs.txt').splitlines() + lines = helpers.readResource('mountinfo_raring_btrfs.txt').splitlines() expected = ('/dev/vda1', 'btrfs', '/') self.assertEqual(expected, util.parse_mount_info('/', lines)) @@ -373,7 +373,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - self.readResource('zpool_status_simple.txt'), '' + helpers.readResource('zpool_status_simple.txt'), '' ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -406,7 +406,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - self.readResource('zpool_status_simple.txt'), 'error' + helpers.readResource('zpool_status_simple.txt'), 'error' ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -414,7 +414,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): @mock.patch('cloudinit.util.subp') def test_parse_mount_with_ext(self, mount_out): - mount_out.return_value = (self.readResource('mount_parse_ext.txt'), '') + mount_out.return_value = ( + helpers.readResource('mount_parse_ext.txt'), '') # this one is valid and exists in mount_parse_ext.txt ret = util.parse_mount('/var') self.assertEqual(('/dev/mapper/vg00-lv_var', 'ext4', '/var'), ret) @@ -430,7 +431,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): @mock.patch('cloudinit.util.subp') def test_parse_mount_with_zfs(self, mount_out): - mount_out.return_value = (self.readResource('mount_parse_zfs.txt'), '') + mount_out.return_value = ( + helpers.readResource('mount_parse_zfs.txt'), '') # this one is valid and exists in mount_parse_zfs.txt ret = util.parse_mount('/var') self.assertEqual(('vmzroot/ROOT/freebsd/var', 'zfs', '/var'), ret) -- cgit v1.2.3 From 6811926fdb991ad02ad9c0134c1d4bbe82ef87e1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 18 Apr 2018 20:37:07 -0600 Subject: Schema: do not warn on duplicate items in commands. runcmd, bootcmd, snap/commands, ubuntu-advantage/commands would log warning (and fail if strict) on duplicate values in the commands. But those should be allowed. Example, it is perfectly valid to do: runcmd: ['sleep 1', 'sleep 1'] LP: #1764264 --- cloudinit/config/cc_bootcmd.py | 1 - cloudinit/config/cc_runcmd.py | 1 - cloudinit/config/cc_snap.py | 1 - cloudinit/config/cc_ubuntu_advantage.py | 1 - cloudinit/config/tests/test_snap.py | 36 ++++++++++++++++++++++ .../unittests/test_handler/test_handler_bootcmd.py | 24 ++++++++++++++- .../unittests/test_handler/test_handler_runcmd.py | 25 +++++++++++++-- 7 files changed, 82 insertions(+), 7 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 233da1ef..db64f0a6 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -63,7 +63,6 @@ schema = { 'additionalProperties': False, 'minItems': 1, 'required': [], - 'uniqueItems': True } } } diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 539cbd5d..b6f6c807 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -66,7 +66,6 @@ schema = { 'additionalProperties': False, 'minItems': 1, 'required': [], - 'uniqueItems': True } } } diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index 34a53fd4..a7a03214 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -110,7 +110,6 @@ schema = { 'additionalItems': False, # Reject non-string & non-list 'minItems': 1, 'minProperties': 1, - 'uniqueItems': True }, 'squashfuse_in_container': { 'type': 'boolean' diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 16b1868b..29d18c96 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -87,7 +87,6 @@ schema = { 'additionalItems': False, # Reject non-string & non-list 'minItems': 1, 'minProperties': 1, - 'uniqueItems': True } }, 'additionalProperties': False, # Reject keys not in schema diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index c5b4a9de..492d2d46 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -340,6 +340,42 @@ class TestSchema(CiTestCase): {'snap': {'assertions': {'01': 'also valid'}}}, schema) self.assertEqual('', self.logs.getvalue()) + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + byebye = ["echo", "bye"] + try: + cfg = {'snap': {'commands': [byebye, byebye]}} + validate_cloudconfig_schema(cfg, schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + byebye = "echo bye" + try: + cfg = {'snap': {'commands': [byebye, byebye]}} + validate_cloudconfig_schema(cfg, schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("command entries can be duplicate.") + + def test_duplicates_are_fine_dict_array(self): + """Duplicated commands dict/array entries are allowed.""" + byebye = ["echo", "bye"] + try: + cfg = {'snap': {'commands': {'00': byebye, '01': byebye}}} + validate_cloudconfig_schema(cfg, schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("command entries can be duplicate.") + + def test_duplicates_are_fine_dict_string(self): + """Duplicated commands dict/string entries are allowed.""" + byebye = "echo bye" + try: + cfg = {'snap': {'commands': {'00': byebye, '01': byebye}}} + validate_cloudconfig_schema(cfg, schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("command entries can be duplicate.") + class TestHandle(CiTestCase): diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py index 29fc25e4..c3abedde 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/test_handler/test_handler_bootcmd.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_bootcmd +from cloudinit.config import cc_bootcmd, schema from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema @@ -138,4 +138,26 @@ class TestBootcmd(CiTestCase): self.logs.getvalue()) +class TestSchema(CiTestCase): + """Directly test schema rather than through handle.""" + + def test_duplicates_are_fine_array_array(self): + """Duplicated array entries are allowed.""" + try: + byebye = ["echo", "bye"] + schema.validate_cloudconfig_schema( + {'bootcmd': [byebye, byebye]}, cc_bootcmd.schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("runcmd entries as list are allowed to be duplicates.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated array entries entries are allowed in values of array.""" + try: + byebye = "echo bye" + schema.validate_cloudconfig_schema( + {'bootcmd': [byebye, byebye]}, cc_bootcmd.schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("runcmd entries are allowed to be duplicates.") + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py index dbbb2717..ee1981d1 100644 --- a/tests/unittests/test_handler/test_handler_runcmd.py +++ b/tests/unittests/test_handler/test_handler_runcmd.py @@ -1,10 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_runcmd +from cloudinit.config import cc_runcmd, schema from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, skipUnlessJsonSchema) + CiTestCase, FilesystemMockingTestCase, skipUnlessJsonSchema) import logging import os @@ -99,4 +99,25 @@ class TestRuncmd(FilesystemMockingTestCase): self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode)) +class TestSchema(CiTestCase): + """Directly test schema rather than through handle.""" + + def test_duplicates_are_fine_array(self): + """Duplicated array entries are allowed.""" + try: + byebye = ["echo", "bye"] + schema.validate_cloudconfig_schema( + {'runcmd': [byebye, byebye]}, cc_runcmd.schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("runcmd entries as list are allowed to be duplicates.") + + def test_duplicates_are_fine_string(self): + """Duplicated string entries are allowed.""" + try: + byebye = "echo bye" + schema.validate_cloudconfig_schema( + {'runcmd': [byebye, byebye]}, cc_runcmd.schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("runcmd entries are allowed to be duplicates.") + # vi: ts=4 expandtab -- cgit v1.2.3 From 1081962eacf2814fea6f4fa3255c530de14e4a24 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 19 Apr 2018 21:30:08 -0600 Subject: pylint: pay attention to unused variable warnings. This enables warnings produced by pylint for unused variables (W0612), and fixes the existing errors. --- .pylintrc | 2 +- cloudinit/analyze/dump.py | 2 +- cloudinit/cmd/tests/test_main.py | 6 ++-- cloudinit/config/cc_apt_configure.py | 2 +- cloudinit/config/cc_emit_upstart.py | 2 +- cloudinit/config/cc_resizefs.py | 8 ++---- cloudinit/config/cc_rh_subscription.py | 18 ++++++------ cloudinit/config/cc_snap.py | 4 +-- cloudinit/config/cc_snappy.py | 4 +-- cloudinit/config/cc_ubuntu_advantage.py | 4 +-- cloudinit/config/schema.py | 4 +-- cloudinit/distros/freebsd.py | 2 +- cloudinit/distros/ubuntu.py | 2 +- cloudinit/net/__init__.py | 2 +- cloudinit/net/cmdline.py | 2 +- cloudinit/net/dhcp.py | 2 +- cloudinit/net/sysconfig.py | 2 +- cloudinit/reporting/events.py | 2 +- cloudinit/sources/DataSourceAliYun.py | 2 +- cloudinit/sources/DataSourceAzure.py | 33 +++++++++------------- cloudinit/sources/DataSourceMAAS.py | 2 +- cloudinit/sources/DataSourceOVF.py | 2 +- cloudinit/sources/DataSourceOpenStack.py | 4 +-- cloudinit/sources/helpers/digitalocean.py | 7 ++--- cloudinit/sources/helpers/openstack.py | 2 +- cloudinit/sources/helpers/vmware/imc/config_nic.py | 2 +- .../sources/helpers/vmware/imc/config_passwd.py | 4 +-- .../sources/helpers/vmware/imc/guestcust_util.py | 4 +-- cloudinit/sources/tests/test_init.py | 2 +- cloudinit/templater.py | 2 +- cloudinit/tests/helpers.py | 2 +- cloudinit/tests/test_util.py | 2 +- cloudinit/url_helper.py | 2 +- cloudinit/util.py | 2 +- tests/cloud_tests/bddeb.py | 2 +- tests/cloud_tests/collect.py | 3 +- tests/cloud_tests/platforms/instances.py | 2 +- tests/cloud_tests/platforms/lxd/instance.py | 10 +++---- tests/cloud_tests/setup_image.py | 11 ++++---- tests/cloud_tests/testcases/base.py | 2 +- .../testcases/examples/including_user_groups.py | 2 +- tests/cloud_tests/testcases/modules/user_groups.py | 2 +- tests/cloud_tests/util.py | 2 +- tests/unittests/test__init__.py | 2 +- tests/unittests/test_datasource/test_azure.py | 4 +-- tests/unittests/test_datasource/test_maas.py | 4 +-- tests/unittests/test_datasource/test_nocloud.py | 3 -- .../test_handler/test_handler_apt_source_v3.py | 2 +- tests/unittests/test_handler/test_handler_ntp.py | 2 +- tests/unittests/test_templating.py | 4 +-- tests/unittests/test_util.py | 6 ++-- 51 files changed, 95 insertions(+), 112 deletions(-) (limited to 'tests/unittests') diff --git a/.pylintrc b/.pylintrc index 0bdfa59d..3bfa0c81 100644 --- a/.pylintrc +++ b/.pylintrc @@ -28,7 +28,7 @@ jobs=4 # W0703(broad-except) # W1401(anomalous-backslash-in-string) -disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401 +disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401 [REPORTS] diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index b071aa19..1f3060d0 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -112,7 +112,7 @@ def parse_ci_logline(line): return None event_description = stage_to_description[event_name] else: - (pymodloglvl, event_type, event_name) = eventstr.split()[0:3] + (_pymodloglvl, event_type, event_name) = eventstr.split()[0:3] event_description = eventstr.split(event_name)[1].strip() event = { diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index dbe421c0..e2c54ae8 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -56,7 +56,7 @@ class TestMain(FilesystemMockingTestCase): cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, subcommand='init') - (item1, item2) = wrap_and_call( + (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', @@ -85,7 +85,7 @@ class TestMain(FilesystemMockingTestCase): cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, subcommand='init') - (item1, item2) = wrap_and_call( + (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', @@ -133,7 +133,7 @@ class TestMain(FilesystemMockingTestCase): self.assertEqual(main.LOG, log) self.assertIsNone(args) - (item1, item2) = wrap_and_call( + (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index afaca464..e18944ec 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -378,7 +378,7 @@ def apply_debconf_selections(cfg, target=None): # get a complete list of packages listed in input pkgs_cfgd = set() - for key, content in selsets.items(): + for _key, content in selsets.items(): for line in content.splitlines(): if line.startswith("#"): continue diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 69dc2d5e..eb9fbe66 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -43,7 +43,7 @@ def is_upstart_system(): del myenv['UPSTART_SESSION'] check_cmd = ['initctl', 'version'] try: - (out, err) = util.subp(check_cmd, env=myenv) + (out, _err) = util.subp(check_cmd, env=myenv) return 'upstart' in out except util.ProcessExecutionError as e: LOG.debug("'%s' returned '%s', not using upstart", diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 013e69b5..82f29e10 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -89,13 +89,11 @@ def _resize_zfs(mount_point, devpth): def _get_dumpfs_output(mount_point): - dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) - return dumpfs_res + return util.subp(['dumpfs', '-m', mount_point])[0] def _get_gpart_output(part): - gpart_res, err = util.subp(['gpart', 'show', part]) - return gpart_res + return util.subp(['gpart', 'show', part])[0] def _can_skip_resize_ufs(mount_point, devpth): @@ -113,7 +111,7 @@ def _can_skip_resize_ufs(mount_point, devpth): if not line.startswith('#'): newfs_cmd = shlex.split(line) opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' - optlist, args = getopt.getopt(newfs_cmd[1:], opt_value) + optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value) for o, a in optlist: if o == "-s": cur_fs_sz = int(a) diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 530808ce..1c679430 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -209,8 +209,7 @@ class SubscriptionManager(object): cmd.append("--serverurl={0}".format(self.server_hostname)) try: - return_out, return_err = self._sub_man_cli(cmd, - logstring_val=True) + return_out = self._sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -233,8 +232,7 @@ class SubscriptionManager(object): # Attempting to register the system only try: - return_out, return_err = self._sub_man_cli(cmd, - logstring_val=True) + return_out = self._sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -257,7 +255,7 @@ class SubscriptionManager(object): .format(self.servicelevel)] try: - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: if e.stdout.rstrip() != '': for line in e.stdout.split("\n"): @@ -275,7 +273,7 @@ class SubscriptionManager(object): def _set_auto_attach(self): cmd = ['attach', '--auto'] try: - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: self.log_warn("Auto-attach failed with: {0}".format(e)) return False @@ -294,12 +292,12 @@ class SubscriptionManager(object): # Get all available pools cmd = ['list', '--available', '--pool-only'] - results, errors = self._sub_man_cli(cmd) + results = self._sub_man_cli(cmd)[0] available = (results.rstrip()).split("\n") # Get all consumed pools cmd = ['list', '--consumed', '--pool-only'] - results, errors = self._sub_man_cli(cmd) + results = self._sub_man_cli(cmd)[0] consumed = (results.rstrip()).split("\n") return available, consumed @@ -311,14 +309,14 @@ class SubscriptionManager(object): ''' cmd = ['repos', '--list-enabled'] - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] active_repos = [] for repo in return_out.split("\n"): if "Repo ID:" in repo: active_repos.append((repo.split(':')[1]).strip()) cmd = ['repos', '--list-disabled'] - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] inactive_repos = [] for repo in return_out.split("\n"): diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index a7a03214..90724b81 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -203,12 +203,12 @@ def maybe_install_squashfuse(cloud): return try: cloud.distro.update_package_sources() - except Exception as e: + except Exception: util.logexc(LOG, "Package update failed") raise try: cloud.distro.install_packages(['squashfuse']) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to install squashfuse") raise diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index bab80bbe..15bee2d3 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -213,7 +213,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None): def read_installed_packages(): ret = [] - for (name, date, version, dev) in read_pkg_data(): + for (name, _date, _version, dev) in read_pkg_data(): if dev: ret.append(NAMESPACE_DELIM.join([name, dev])) else: @@ -222,7 +222,7 @@ def read_installed_packages(): def read_pkg_data(): - out, err = util.subp([SNAPPY_CMD, "list"]) + out, _err = util.subp([SNAPPY_CMD, "list"]) pkg_data = [] for line in out.splitlines()[1:]: toks = line.split(sep=None, maxsplit=3) diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 29d18c96..5e082bd6 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -148,12 +148,12 @@ def maybe_install_ua_tools(cloud): return try: cloud.distro.update_package_sources() - except Exception as e: + except Exception: util.logexc(LOG, "Package update failed") raise try: cloud.distro.install_packages(['ubuntu-advantage-tools']) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to install ubuntu-advantage-tools") raise diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index ca7d0d5b..76826e05 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -297,8 +297,8 @@ def get_schema(): configs_dir = os.path.dirname(os.path.abspath(__file__)) potential_handlers = find_modules(configs_dir) - for (fname, mod_name) in potential_handlers.items(): - mod_locs, looked_locs = importer.find_module( + for (_fname, mod_name) in potential_handlers.items(): + mod_locs, _looked_locs = importer.find_module( mod_name, ['cloudinit.config'], ['schema']) if mod_locs: mod = importer.import_module(mod_locs[0]) diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 099fac5c..5b1718a4 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -113,7 +113,7 @@ class Distro(distros.Distro): n = re.search(r'\d+$', dev) index = n.group(0) - (out, err) = util.subp(['ifconfig', '-a']) + (out, _err) = util.subp(['ifconfig', '-a']) ifconfigoutput = [x for x in (out.strip()).splitlines() if len(x.split()) > 0] bsddev = 'NOT_FOUND' diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index fdc1f622..68154104 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -25,7 +25,7 @@ class Distro(debian.Distro): def preferred_ntp_clients(self): """The preferred ntp client is dependent on the version.""" if not self._preferred_ntp_clients: - (name, version, codename) = util.system_info()['dist'] + (_name, _version, codename) = util.system_info()['dist'] # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd. if codename == "xenial" and not util.system_is_snappy(): self._preferred_ntp_clients = ['ntp'] diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index f69c0ef2..80054546 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -295,7 +295,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): def _version_2(netcfg): renames = [] - for key, ent in netcfg.get('ethernets', {}).items(): + for ent in netcfg.get('ethernets', {}).values(): # only rename if configured to do so name = ent.get('set-name') if not name: diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 9e9fe0fe..f89a0f73 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -65,7 +65,7 @@ def _klibc_to_config_entry(content, mac_addrs=None): iface['mac_address'] = mac_addrs[name] # Handle both IPv4 and IPv6 values - for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')): + for pre in ('IPV4', 'IPV6'): # if no IPV4ADDR or IPV6ADDR, then go on. if pre + "ADDR" not in data: continue diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 087c0c03..12cf5097 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -216,7 +216,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None): if leases_d is None: leases_d = NETWORKD_LEASES_DIR leases = networkd_load_leases(leases_d=leases_d) - for ifindex, data in sorted(leases.items()): + for _ifindex, data in sorted(leases.items()): if data.get(keyname): return data[keyname] return None diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 39d89c46..7a7f5093 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -364,7 +364,7 @@ class Renderer(renderer.Renderer): @classmethod def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): - for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + for _, subnet in enumerate(subnets, start=len(iface_cfg.children)): for route in subnet.get('routes', []): is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway']) diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py index 4f62d2f9..e5dfab33 100644 --- a/cloudinit/reporting/events.py +++ b/cloudinit/reporting/events.py @@ -192,7 +192,7 @@ class ReportEventStack(object): def _childrens_finish_info(self): for cand_result in (status.FAIL, status.WARN): - for name, (value, msg) in self.children.items(): + for _name, (value, _msg) in self.children.items(): if value == cand_result: return (value, self.message) return (self.result, self.message) diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 22279d09..858e0827 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -45,7 +45,7 @@ def _is_aliyun(): def parse_public_keys(public_keys): keys = [] - for key_id, key_body in public_keys.items(): + for _key_id, key_body in public_keys.items(): if isinstance(key_body, str): keys.append(key_body.strip()) elif isinstance(key_body, list): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 0ee622e2..a71197a6 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -107,31 +107,24 @@ def find_dev_from_busdev(camcontrol_out, busdev): return None -def get_dev_storvsc_sysctl(): +def execute_or_debug(cmd, fail_ret=None): try: - sysctl_out, err = util.subp(['sysctl', 'dev.storvsc']) + return util.subp(cmd)[0] except util.ProcessExecutionError: - LOG.debug("Fail to execute sysctl dev.storvsc") - sysctl_out = "" - return sysctl_out + LOG.debug("Failed to execute: %s", ' '.join(cmd)) + return fail_ret + + +def get_dev_storvsc_sysctl(): + return execute_or_debug(["sysctl", "dev.storvsc"], fail_ret="") def get_camcontrol_dev_bus(): - try: - camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b']) - except util.ProcessExecutionError: - LOG.debug("Fail to execute camcontrol devlist -b") - return None - return camcontrol_b_out + return execute_or_debug(['camcontrol', 'devlist', '-b']) def get_camcontrol_dev(): - try: - camcontrol_out, err = util.subp(['camcontrol', 'devlist']) - except util.ProcessExecutionError: - LOG.debug("Fail to execute camcontrol devlist") - return None - return camcontrol_out + return execute_or_debug(['camcontrol', 'devlist']) def get_resource_disk_on_freebsd(port_id): @@ -474,7 +467,7 @@ class DataSourceAzure(sources.DataSource): before we go into our polling loop.""" try: get_metadata_from_fabric(None, lease['unknown-245']) - except Exception as exc: + except Exception: LOG.warning( "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) @@ -492,7 +485,7 @@ class DataSourceAzure(sources.DataSource): jump back into the polling loop in order to retrieve the ovf_env.""" if not ret: return False - (md, self.userdata_raw, cfg, files) = ret + (_md, self.userdata_raw, cfg, _files) = ret path = REPROVISION_MARKER_FILE if (cfg.get('PreprovisionedVm') is True or os.path.isfile(path)): @@ -528,7 +521,7 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['agent_command']) try: fabric_data = metadata_func() - except Exception as exc: + except Exception: LOG.warning( "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 6ac88635..aa56addb 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -204,7 +204,7 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, seed_url = seed_url[:-1] md = {} - for path, dictname, binary, optional in DS_FIELDS: + for path, _dictname, binary, optional in DS_FIELDS: if version is None: url = "%s/%s" % (seed_url, path) else: diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index dc914a72..178ccb0f 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -556,7 +556,7 @@ def search_file(dirpath, filename): if not dirpath or not filename: return None - for root, dirs, files in os.walk(dirpath): + for root, _dirs, files in os.walk(dirpath): if filename in files: return os.path.join(root, filename) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index e55a7638..fb166ae1 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -86,7 +86,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls.append(md_url) url2base[md_url] = url - (max_wait, timeout, retries) = self._get_url_settings() + (max_wait, timeout, _retries) = self._get_url_settings() start_time = time.time() avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, timeout=timeout) @@ -106,7 +106,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): except IOError: return False - (max_wait, timeout, retries) = self._get_url_settings() + (_max_wait, timeout, retries) = self._get_url_settings() try: results = util.log_time(LOG.debug, diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index 693f8d5c..0e7cccac 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -41,10 +41,9 @@ def assign_ipv4_link_local(nic=None): "address") try: - (result, _err) = util.subp(ip_addr_cmd) + util.subp(ip_addr_cmd) LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic) - - (result, _err) = util.subp(ip_link_cmd) + util.subp(ip_link_cmd) LOG.debug("brought device '%s' up", nic) except Exception: util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed." @@ -75,7 +74,7 @@ def del_ipv4_link_local(nic=None): ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic] try: - (result, _err) = util.subp(ip_addr_cmd) + util.subp(ip_addr_cmd) LOG.debug("removed ip4LL addresses from %s", nic) except Exception as e: diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 26f3168d..a4cf0667 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -638,7 +638,7 @@ def convert_net_json(network_json=None, known_macs=None): known_macs = net.get_interfaces_by_mac() # go through and fill out the link_id_info with names - for link_id, info in link_id_info.items(): + for _link_id, info in link_id_info.items(): if info.get('name'): continue if info.get('mac') in known_macs: diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 2d8900e2..3ef8c624 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -73,7 +73,7 @@ class NicConfigurator(object): The mac address(es) are in the lower case """ cmd = ['ip', 'addr', 'show'] - (output, err) = util.subp(cmd) + output, _err = util.subp(cmd) sections = re.split(r'\n\d+: ', '\n' + output)[1:] macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py index 75cfbaaf..8c91fa41 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py +++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py @@ -56,10 +56,10 @@ class PasswordConfigurator(object): LOG.info('Expiring password.') for user in uidUserList: try: - out, err = util.subp(['passwd', '--expire', user]) + util.subp(['passwd', '--expire', user]) except util.ProcessExecutionError as e: if os.path.exists('/usr/bin/chage'): - out, e = util.subp(['chage', '-d', '0', user]) + util.subp(['chage', '-d', '0', user]) else: LOG.warning('Failed to expire password for %s with error: ' '%s', user, e) diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index 44075255..a590f323 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -91,7 +91,7 @@ def enable_nics(nics): for attempt in range(0, enableNicsWaitRetries): logger.debug("Trying to connect interfaces, attempt %d", attempt) - (out, err) = set_customization_status( + (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS, nics) @@ -104,7 +104,7 @@ def enable_nics(nics): return for count in range(0, enableNicsWaitCount): - (out, err) = set_customization_status( + (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS, nics) diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index e7fda22a..452e9219 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -278,7 +278,7 @@ class TestDataSource(CiTestCase): base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505 # Import all DataSource subclasses so we can inspect them. modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) - for loc, name in modules.items(): + for _loc, name in modules.items(): mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) if mod_locs: importer.import_module(mod_locs[0]) diff --git a/cloudinit/templater.py b/cloudinit/templater.py index 9a087e1c..7e7acb86 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -147,7 +147,7 @@ def render_string(content, params): Warning: py2 str with non-ascii chars will cause UnicodeDecodeError.""" if not params: params = {} - template_type, renderer, content = detect_template(content) + _template_type, renderer, content = detect_template(content) return renderer(content, params) # vi: ts=4 expandtab diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 82fd347b..5aada6e7 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -334,7 +334,7 @@ def dir2dict(startdir, prefix=None): flist = {} if prefix is None: prefix = startdir - for root, dirs, files in os.walk(startdir): + for root, _dirs, files in os.walk(startdir): for fname in files: fpath = os.path.join(root, fname) key = fpath[len(prefix):] diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 3f37dbb6..76eed076 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -135,7 +135,7 @@ class TestGetHostnameFqdn(CiTestCase): def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): """Calls to cloud.get_hostname pass the metadata_only parameter.""" mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') - hostname, fqdn = util.get_hostname_fqdn( + _hn, _fqdn = util.get_hostname_fqdn( cfg={}, cloud=mycloud, metadata_only=True) self.assertEqual( [{'fqdn': True, 'metadata_only': True}, diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 03a573af..1de07b1c 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -519,7 +519,7 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, resource_owner_secret=token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT, timestamp=timestamp) - uri, signed_headers, body = client.sign(url) + _uri, signed_headers, _body = client.sign(url) return signed_headers # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 1717b529..310758dd 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2214,7 +2214,7 @@ def parse_mtab(path): def find_freebsd_part(label_part): if label_part.startswith("/dev/label/"): target_label = label_part[5:] - (label_part, err) = subp(['glabel', 'status', '-s']) + (label_part, _err) = subp(['glabel', 'status', '-s']) for labels in label_part.split("\n"): items = labels.split() if len(items) > 0 and items[0].startswith(target_label): diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py index b9cfcfa6..f04d0cd4 100644 --- a/tests/cloud_tests/bddeb.py +++ b/tests/cloud_tests/bddeb.py @@ -113,7 +113,7 @@ def bddeb(args): @return_value: fail count """ LOG.info('preparing to build cloud-init deb') - (res, failed) = run_stage('build deb', [partial(setup_build, args)]) + _res, failed = run_stage('build deb', [partial(setup_build, args)]) return failed # vi: ts=4 expandtab diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index d4f9135b..1ba72856 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -25,7 +25,8 @@ def collect_script(instance, base_dir, script, script_name): script.encode(), rcs=False, description='collect: {}'.format(script_name)) if err: - LOG.debug("collect script %s had stderr: %s", script_name, err) + LOG.debug("collect script %s exited '%s' and had stderr: %s", + script_name, err, exit) if not isinstance(out, bytes): raise util.PlatformError( "Collection of '%s' returned type %s, expected bytes: %s" % diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py index 3bad021f..cc439d29 100644 --- a/tests/cloud_tests/platforms/instances.py +++ b/tests/cloud_tests/platforms/instances.py @@ -108,7 +108,7 @@ class Instance(TargetBase): return client except (ConnectionRefusedError, AuthenticationException, BadHostKeyException, ConnectionResetError, SSHException, - OSError) as e: + OSError): retries -= 1 time.sleep(10) diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index 0d957bca..1c17c781 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -152,9 +152,8 @@ class LXDInstance(Instance): return fp.read() try: - stdout, stderr = subp( - ['lxc', 'console', '--show-log', self.name], decode=False) - return stdout + return subp(['lxc', 'console', '--show-log', self.name], + decode=False)[0] except ProcessExecutionError as e: raise PlatformError( "console log", @@ -214,11 +213,10 @@ def _has_proper_console_support(): reason = "LXD Driver version not 3.x+ (%s)" % dver else: try: - stdout, stderr = subp(['lxc', 'console', '--help'], - decode=False) + stdout = subp(['lxc', 'console', '--help'], decode=False)[0] if not (b'console' in stdout and b'log' in stdout): reason = "no '--log' in lxc console --help" - except ProcessExecutionError as e: + except ProcessExecutionError: reason = "no 'console' command in lxc client" if reason: diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py index 6d242115..4e195709 100644 --- a/tests/cloud_tests/setup_image.py +++ b/tests/cloud_tests/setup_image.py @@ -25,10 +25,9 @@ def installed_package_version(image, package, ensure_installed=True): else: raise NotImplementedError - msg = 'query version for package: {}'.format(package) - (out, err, exit) = image.execute( - cmd, description=msg, rcs=(0,) if ensure_installed else range(0, 256)) - return out.strip() + return image.execute( + cmd, description='query version for package: {}'.format(package), + rcs=(0,) if ensure_installed else range(0, 256))[0].strip() def install_deb(args, image): @@ -54,7 +53,7 @@ def install_deb(args, image): remote_path], description=msg) # check installed deb version matches package fmt = ['-W', "--showformat=${Version}"] - (out, err, exit) = image.execute(['dpkg-deb'] + fmt + [remote_path]) + out = image.execute(['dpkg-deb'] + fmt + [remote_path])[0] expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: @@ -85,7 +84,7 @@ def install_rpm(args, image): image.execute(['rpm', '-U', remote_path], description=msg) fmt = ['--queryformat', '"%{VERSION}"'] - (out, err, exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) + (out, _err, _exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 4fda8f91..0d1916b4 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -159,7 +159,7 @@ class CloudTestCase(unittest.TestCase): expected_net_keys = [ 'public-ipv4s', 'ipv4-associations', 'local-hostname', 'public-hostname'] - for mac, mac_data in macs.items(): + for mac_data in macs.values(): for key in expected_net_keys: self.assertIn(key, mac_data) self.assertIsNotNone( diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.py b/tests/cloud_tests/testcases/examples/including_user_groups.py index 93b7a82d..4067348d 100644 --- a/tests/cloud_tests/testcases/examples/including_user_groups.py +++ b/tests/cloud_tests/testcases/examples/including_user_groups.py @@ -42,7 +42,7 @@ class TestUserGroups(base.CloudTestCase): def test_user_root_in_secret(self): """Test root user is in 'secret' group.""" - user, _, groups = self.get_data_file('root_groups').partition(":") + _user, _, groups = self.get_data_file('root_groups').partition(":") self.assertIn("secret", groups.split(), msg="User root is not in group 'secret'") diff --git a/tests/cloud_tests/testcases/modules/user_groups.py b/tests/cloud_tests/testcases/modules/user_groups.py index 93b7a82d..4067348d 100644 --- a/tests/cloud_tests/testcases/modules/user_groups.py +++ b/tests/cloud_tests/testcases/modules/user_groups.py @@ -42,7 +42,7 @@ class TestUserGroups(base.CloudTestCase): def test_user_root_in_secret(self): """Test root user is in 'secret' group.""" - user, _, groups = self.get_data_file('root_groups').partition(":") + _user, _, groups = self.get_data_file('root_groups').partition(":") self.assertIn("secret", groups.split(), msg="User root is not in group 'secret'") diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py index 3dd4996d..06f7d865 100644 --- a/tests/cloud_tests/util.py +++ b/tests/cloud_tests/util.py @@ -358,7 +358,7 @@ class TargetBase(object): # when sh is invoked with '-c', then the first argument is "$0" # which is commonly understood as the "program name". # 'read_data' is the program name, and 'remote_path' is '$1' - stdout, stderr, rc = self._execute( + stdout, _stderr, rc = self._execute( ["sh", "-c", 'exec cat "$1"', 'read_data', remote_path]) if rc != 0: raise RuntimeError("Failed to read file '%s'" % remote_path) diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 25878d7a..f1ab02e9 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -214,7 +214,7 @@ class TestCmdlineUrl(CiTestCase): def test_no_key_found(self, m_read): cmdline = "ro mykey=http://example.com/foo root=foo" fpath = self.tmp_path("ccpath") - lvl, msg = main.attempt_cmdline_url( + lvl, _msg = main.attempt_cmdline_url( fpath, network=True, cmdline=cmdline) m_read.assert_not_called() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3e8b7913..88fe76c7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -214,7 +214,7 @@ scbus-1 on xpt0 bus 0 self.assertIn(tag, x) def tags_equal(x, y): - for x_tag, x_val in x.items(): + for x_val in x.values(): y_val = y.get(x_val.tag) self.assertEqual(x_val.text, y_val.text) @@ -1216,7 +1216,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): fake_resp.return_value = mock.MagicMock(status_code=200, text=content, content=content) dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - md, ud, cfg, d = dsa._reprovision() + md, _ud, cfg, _d = dsa._reprovision() self.assertEqual(md['local-hostname'], hostname) self.assertEqual(cfg['system_info']['default_user']['name'], username) self.assertEqual(fake_resp.call_args_list, diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 6e4031cf..c84d067e 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -53,7 +53,7 @@ class TestMAASDataSource(CiTestCase): my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) - ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) + ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d) self.assertEqual(userdata, ud) for key in ('instance-id', 'local-hostname'): @@ -149,7 +149,7 @@ class TestMAASDataSource(CiTestCase): 'meta-data/local-hostname': 'test-hostname', 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), } - ud, md, vd = self.mock_read_maas_seed_url( + _ud, md, vd = self.mock_read_maas_seed_url( valid, "http://example.com/foo") self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 70d50de4..cdbd1e1a 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -51,9 +51,6 @@ class TestNoCloudDataSource(CiTestCase): class PsuedoException(Exception): pass - def my_find_devs_with(*args, **kwargs): - raise PsuedoException - self.mocks.enter_context( mock.patch.object(util, 'find_devs_with', side_effect=PsuedoException)) diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index 7bb1b7c4..e486862d 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -528,7 +528,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): expected = sorted([npre + suff for opre, npre, suff in files]) # create files - for (opre, npre, suff) in files: + for (opre, _npre, suff) in files: fpath = os.path.join(apt_lists_d, opre + suff) util.write_file(fpath, content=fpath) diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 02676aa6..17c53559 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -76,7 +76,7 @@ class TestNtp(FilesystemMockingTestCase): template = TIMESYNCD_TEMPLATE else: template = NTP_TEMPLATE - (confpath, template_fn) = self._generate_template(template=template) + (confpath, _template_fn) = self._generate_template(template=template) ntpconfig = copy.deepcopy(dcfg[client]) ntpconfig['confpath'] = confpath ntpconfig['template_name'] = os.path.basename(confpath) diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 1080e135..20c87efa 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -50,12 +50,12 @@ class TestTemplates(test_helpers.CiTestCase): def test_detection(self): blob = "## template:cheetah" - (template_type, renderer, contents) = templater.detect_template(blob) + (template_type, _renderer, contents) = templater.detect_template(blob) self.assertIn("cheetah", template_type) self.assertEqual("", contents.strip()) blob = "blahblah $blah" - (template_type, renderer, contents) = templater.detect_template(blob) + (template_type, _renderer, _contents) = templater.detect_template(blob) self.assertIn("cheetah", template_type) self.assertEqual(blob, contents) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index e04ea031..84941c7d 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -774,11 +774,11 @@ class TestSubp(helpers.CiTestCase): def test_subp_reads_env(self): with mock.patch.dict("os.environ", values={'FOO': 'BAR'}): - out, err = util.subp(self.printenv + ['FOO'], capture=True) + out, _err = util.subp(self.printenv + ['FOO'], capture=True) self.assertEqual('FOO=BAR', out.splitlines()[0]) def test_subp_env_and_update_env(self): - out, err = util.subp( + out, _err = util.subp( self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, env={'FOO': 'BAR'}, update_env={'HOME': '/myhome', 'K2': 'V2'}) @@ -788,7 +788,7 @@ class TestSubp(helpers.CiTestCase): def test_subp_update_env(self): extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'} with mock.patch.dict("os.environ", values=extra): - out, err = util.subp( + out, _err = util.subp( self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, update_env={'HOME': '/myhome', 'K2': 'V2'}) -- cgit v1.2.3 From 4952a8545b61ceb2fe26a933d2f64020d0281703 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 20 Apr 2018 12:22:23 -0600 Subject: set_passwords: Add newline to end of sshd config, only restart if updated. This admittedly does a fairly extensive re-factor to simply add a newline to the end of sshd_config. It makes the ssh_config updating portion of set_passwords more testable and adds tests for that. The new function is in 'update_ssh_config_lines' which allows you to update a config with multiple changes even though only a single one is currently used. We also only restart the ssh daemon now if a change was made to the config file. Before it was always restarted if the user specified a value for ssh_pwauth other than 'unchanged'. Thanks to Lorens Kockum for initial diagnosis and patch. LP: #1677205 --- cloudinit/config/cc_set_passwords.py | 105 ++++++++++++--------------- cloudinit/config/tests/test_set_passwords.py | 71 ++++++++++++++++++ cloudinit/ssh_util.py | 70 ++++++++++++++++-- tests/unittests/test_sshutil.py | 97 ++++++++++++++++++++++++- 4 files changed, 273 insertions(+), 70 deletions(-) create mode 100644 cloudinit/config/tests/test_set_passwords.py (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index bb24d57f..5ef97376 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -68,16 +68,57 @@ import re import sys from cloudinit.distros import ug_util -from cloudinit import ssh_util +from cloudinit import log as logging +from cloudinit.ssh_util import update_ssh_config from cloudinit import util from string import ascii_letters, digits +LOG = logging.getLogger(__name__) + # We are removing certain 'painful' letters/numbers PW_SET = (''.join([x for x in ascii_letters + digits if x not in 'loLOI01'])) +def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): + """Apply sshd PasswordAuthentication changes. + + @param pw_auth: config setting from 'pw_auth'. + Best given as True, False, or "unchanged". + @param service_cmd: The service command list (['service']) + @param service_name: The name of the sshd service for the system. + + @return: None""" + cfg_name = "PasswordAuthentication" + if service_cmd is None: + service_cmd = ["service"] + + if util.is_true(pw_auth): + cfg_val = 'yes' + elif util.is_false(pw_auth): + cfg_val = 'no' + else: + bmsg = "Leaving ssh config '%s' unchanged." % cfg_name + if pw_auth is None or pw_auth.lower() == 'unchanged': + LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) + else: + LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth) + return + + updated = update_ssh_config({cfg_name: cfg_val}) + if not updated: + LOG.debug("No need to restart ssh service, %s not updated.", cfg_name) + return + + if 'systemctl' in service_cmd: + cmd = list(service_cmd) + ["restart", service_name] + else: + cmd = list(service_cmd) + [service_name, "restart"] + util.subp(cmd) + LOG.debug("Restarted the ssh daemon.") + + def handle(_name, cfg, cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] @@ -170,65 +211,9 @@ def handle(_name, cfg, cloud, log, args): if expired_users: log.debug("Expired passwords for: %s users", expired_users) - change_pwauth = False - pw_auth = None - if 'ssh_pwauth' in cfg: - if util.is_true(cfg['ssh_pwauth']): - change_pwauth = True - pw_auth = 'yes' - elif util.is_false(cfg['ssh_pwauth']): - change_pwauth = True - pw_auth = 'no' - elif str(cfg['ssh_pwauth']).lower() == 'unchanged': - log.debug('Leaving auth line unchanged') - change_pwauth = False - elif not str(cfg['ssh_pwauth']).strip(): - log.debug('Leaving auth line unchanged') - change_pwauth = False - elif not cfg['ssh_pwauth']: - log.debug('Leaving auth line unchanged') - change_pwauth = False - else: - msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth'] - util.logexc(log, msg) - - if change_pwauth: - replaced_auth = False - - # See: man sshd_config - old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) - new_lines = [] - i = 0 - for (i, line) in enumerate(old_lines): - # Keywords are case-insensitive and arguments are case-sensitive - if line.key == 'passwordauthentication': - log.debug("Replacing auth line %s with %s", i + 1, pw_auth) - replaced_auth = True - line.value = pw_auth - new_lines.append(line) - - if not replaced_auth: - log.debug("Adding new auth line %s", i + 1) - replaced_auth = True - new_lines.append(ssh_util.SshdConfigLine('', - 'PasswordAuthentication', - pw_auth)) - - lines = [str(l) for l in new_lines] - util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines), - copy_mode=True) - - try: - cmd = cloud.distro.init_cmd # Default service - cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh')) - cmd.append('restart') - if 'systemctl' in cmd: # Switch action ordering - cmd[1], cmd[2] = cmd[2], cmd[1] - cmd = filter(None, cmd) # Remove empty arguments - util.subp(cmd) - log.debug("Restarted the ssh daemon") - except Exception: - util.logexc(log, "Restarting of the ssh daemon failed") + handle_ssh_pwauth( + cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd, + service_name=cloud.distro.get_option('ssh_svcname', 'ssh')) if len(errors): log.debug("%s errors occured, re-raising the last one", len(errors)) diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py new file mode 100644 index 00000000..b051ec82 --- /dev/null +++ b/cloudinit/config/tests/test_set_passwords.py @@ -0,0 +1,71 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import mock + +from cloudinit.config import cc_set_passwords as setpass +from cloudinit.tests.helpers import CiTestCase +from cloudinit import util + +MODPATH = "cloudinit.config.cc_set_passwords." + + +class TestHandleSshPwauth(CiTestCase): + """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth.""" + + with_logs = True + + @mock.patch(MODPATH + "util.subp") + def test_unknown_value_logs_warning(self, m_subp): + setpass.handle_ssh_pwauth("floo") + self.assertIn("Unrecognized value: ssh_pwauth=floo", + self.logs.getvalue()) + m_subp.assert_not_called() + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch(MODPATH + "util.subp") + def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config): + """If systemctl in service cmd: systemctl restart name.""" + setpass.handle_ssh_pwauth( + True, service_cmd=["systemctl"], service_name="myssh") + self.assertEqual(mock.call(["systemctl", "restart", "myssh"]), + m_subp.call_args) + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch(MODPATH + "util.subp") + def test_service_as_service_cmd(self, m_subp, m_update_ssh_config): + """If systemctl in service cmd: systemctl restart name.""" + setpass.handle_ssh_pwauth( + True, service_cmd=["service"], service_name="myssh") + self.assertEqual(mock.call(["service", "myssh", "restart"]), + m_subp.call_args) + + @mock.patch(MODPATH + "update_ssh_config", return_value=False) + @mock.patch(MODPATH + "util.subp") + def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config): + """If config is not updated, then no system restart should be done.""" + setpass.handle_ssh_pwauth(True) + m_subp.assert_not_called() + self.assertIn("No need to restart ssh", self.logs.getvalue()) + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch(MODPATH + "util.subp") + def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config): + """If 'unchanged', then no updates to config and no restart.""" + setpass.handle_ssh_pwauth( + "unchanged", service_cmd=["systemctl"], service_name="myssh") + m_update_ssh_config.assert_not_called() + m_subp.assert_not_called() + + @mock.patch(MODPATH + "util.subp") + def test_valid_change_values(self, m_subp): + """If value is a valid changen value, then update should be called.""" + upname = MODPATH + "update_ssh_config" + optname = "PasswordAuthentication" + for value in util.FALSE_STRINGS + util.TRUE_STRINGS: + optval = "yes" if value in util.TRUE_STRINGS else "no" + with mock.patch(upname, return_value=False) as m_update: + setpass.handle_ssh_pwauth(value) + m_update.assert_called_with({optname: optval}) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 882517f5..73c31772 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -279,24 +279,28 @@ class SshdConfigLine(object): def parse_ssh_config(fname): + if not os.path.isfile(fname): + return [] + return parse_ssh_config_lines(util.load_file(fname).splitlines()) + + +def parse_ssh_config_lines(lines): # See: man sshd_config # The file contains keyword-argument pairs, one per line. # Lines starting with '#' and empty lines are interpreted as comments. # Note: key-words are case-insensitive and arguments are case-sensitive - lines = [] - if not os.path.isfile(fname): - return lines - for line in util.load_file(fname).splitlines(): + ret = [] + for line in lines: line = line.strip() if not line or line.startswith("#"): - lines.append(SshdConfigLine(line)) + ret.append(SshdConfigLine(line)) continue try: key, val = line.split(None, 1) except ValueError: key, val = line.split('=', 1) - lines.append(SshdConfigLine(line, key, val)) - return lines + ret.append(SshdConfigLine(line, key, val)) + return ret def parse_ssh_config_map(fname): @@ -310,4 +314,56 @@ def parse_ssh_config_map(fname): ret[line.key] = line.value return ret + +def update_ssh_config(updates, fname=DEF_SSHD_CFG): + """Read fname, and update if changes are necessary. + + @param updates: dictionary of desired values {Option: value} + @return: boolean indicating if an update was done.""" + lines = parse_ssh_config(fname) + changed = update_ssh_config_lines(lines=lines, updates=updates) + if changed: + util.write_file( + fname, "\n".join([str(l) for l in lines]) + "\n", copy_mode=True) + return len(changed) != 0 + + +def update_ssh_config_lines(lines, updates): + """Update the ssh config lines per updates. + + @param lines: array of SshdConfigLine. This array is updated in place. + @param updates: dictionary of desired values {Option: value} + @return: A list of keys in updates that were changed.""" + found = set() + changed = [] + + # Keywords are case-insensitive and arguments are case-sensitive + casemap = dict([(k.lower(), k) for k in updates.keys()]) + + for (i, line) in enumerate(lines, start=1): + if not line.key: + continue + if line.key in casemap: + key = casemap[line.key] + value = updates[key] + found.add(key) + if line.value == value: + LOG.debug("line %d: option %s already set to %s", + i, key, value) + else: + changed.append(key) + LOG.debug("line %d: option %s updated %s -> %s", i, + key, line.value, value) + line.value = value + + if len(found) != len(updates): + for key, value in updates.items(): + if key in found: + continue + changed.append(key) + lines.append(SshdConfigLine('', key, value)) + LOG.debug("line %d: option %s added with %s", + len(lines), key, value) + return changed + # vi: ts=4 expandtab diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 4c62c8be..73ae897f 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -4,6 +4,7 @@ from mock import patch from cloudinit import ssh_util from cloudinit.tests import helpers as test_helpers +from cloudinit import util VALID_CONTENT = { @@ -56,7 +57,7 @@ TEST_OPTIONS = ( 'user \"root\".\';echo;sleep 10"') -class TestAuthKeyLineParser(test_helpers.TestCase): +class TestAuthKeyLineParser(test_helpers.CiTestCase): def test_simple_parse(self): # test key line with common 3 fields (keytype, base64, comment) @@ -126,7 +127,7 @@ class TestAuthKeyLineParser(test_helpers.TestCase): self.assertFalse(key.valid()) -class TestUpdateAuthorizedKeys(test_helpers.TestCase): +class TestUpdateAuthorizedKeys(test_helpers.CiTestCase): def test_new_keys_replace(self): """new entries with the same base64 should replace old.""" @@ -168,7 +169,7 @@ class TestUpdateAuthorizedKeys(test_helpers.TestCase): self.assertEqual(expected, found) -class TestParseSSHConfig(test_helpers.TestCase): +class TestParseSSHConfig(test_helpers.CiTestCase): def setUp(self): self.load_file_patch = patch('cloudinit.ssh_util.util.load_file') @@ -235,4 +236,94 @@ class TestParseSSHConfig(test_helpers.TestCase): self.assertEqual('foo', ret[0].key) self.assertEqual('bar', ret[0].value) + +class TestUpdateSshConfigLines(test_helpers.CiTestCase): + """Test the update_ssh_config_lines method.""" + exlines = [ + "#PasswordAuthentication yes", + "UsePAM yes", + "# Comment line", + "AcceptEnv LANG LC_*", + "X11Forwarding no", + ] + pwauth = "PasswordAuthentication" + + def check_line(self, line, opt, val): + self.assertEqual(line.key, opt.lower()) + self.assertEqual(line.value, val) + self.assertIn(opt, str(line)) + self.assertIn(val, str(line)) + + def test_new_option_added(self): + """A single update of non-existing option.""" + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, {'MyKey': 'MyVal'}) + self.assertEqual(['MyKey'], result) + self.check_line(lines[-1], "MyKey", "MyVal") + + def test_commented_out_not_updated_but_appended(self): + """Implementation does not un-comment and update lines.""" + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, {self.pwauth: "no"}) + self.assertEqual([self.pwauth], result) + self.check_line(lines[-1], self.pwauth, "no") + + def test_single_option_updated(self): + """A single update should have change made and line updated.""" + opt, val = ("UsePAM", "no") + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, {opt: val}) + self.assertEqual([opt], result) + self.check_line(lines[1], opt, val) + + def test_multiple_updates_with_add(self): + """Verify multiple updates some added some changed, some not.""" + updates = {"UsePAM": "no", "X11Forwarding": "no", "NewOpt": "newval", + "AcceptEnv": "LANG ADD LC_*"} + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, updates) + self.assertEqual(set(["UsePAM", "NewOpt", "AcceptEnv"]), set(result)) + self.check_line(lines[3], "AcceptEnv", updates["AcceptEnv"]) + + def test_return_empty_if_no_changes(self): + """If there are no changes, then return should be empty list.""" + updates = {"UsePAM": "yes"} + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, updates) + self.assertEqual([], result) + self.assertEqual(self.exlines, [str(l) for l in lines]) + + def test_keycase_not_modified(self): + """Original case of key should not be changed on update. + This behavior is to keep original config as much intact as can be.""" + updates = {"usepam": "no"} + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, updates) + self.assertEqual(["usepam"], result) + self.assertEqual("UsePAM no", str(lines[1])) + + +class TestUpdateSshConfig(test_helpers.CiTestCase): + cfgdata = '\n'.join(["#Option val", "MyKey ORIG_VAL", ""]) + + def test_modified(self): + mycfg = self.tmp_path("ssh_config_1") + util.write_file(mycfg, self.cfgdata) + ret = ssh_util.update_ssh_config({"MyKey": "NEW_VAL"}, mycfg) + self.assertTrue(ret) + found = util.load_file(mycfg) + self.assertEqual(self.cfgdata.replace("ORIG_VAL", "NEW_VAL"), found) + # assert there is a newline at end of file (LP: #1677205) + self.assertEqual('\n', found[-1]) + + def test_not_modified(self): + mycfg = self.tmp_path("ssh_config_2") + util.write_file(mycfg, self.cfgdata) + with patch("cloudinit.ssh_util.util.write_file") as m_write_file: + ret = ssh_util.update_ssh_config({"MyKey": "ORIG_VAL"}, mycfg) + self.assertFalse(ret) + self.assertEqual(self.cfgdata, util.load_file(mycfg)) + m_write_file.assert_not_called() + + # vi: ts=4 expandtab -- cgit v1.2.3 From 5037252ca5dc54c6d978b23dba063ac5fabc98fa Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 20 Apr 2018 20:25:48 -0400 Subject: schema: in validation, raise ImportError if strict but no jsonschema. validate_cloudconfig_schema with strict=True would not actually validate if there was no jsonschema available. That seems kind of strange. The change here is to make it raise an exception if strict was passed in. And then to fix the one test that needed a skipIfJsonSchema wrapper. --- cloudinit/config/tests/test_snap.py | 41 +++++++----------- cloudinit/config/tests/test_ubuntu_advantage.py | 30 +++++++++++++- cloudinit/tests/helpers.py | 19 +++++++++ .../unittests/test_handler/test_handler_bootcmd.py | 42 +++++++++---------- .../unittests/test_handler/test_handler_runcmd.py | 48 +++++++++++----------- 5 files changed, 104 insertions(+), 76 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index 492d2d46..34c80f1e 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -9,7 +9,7 @@ from cloudinit.config.cc_snap import ( from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema) + CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema) SYSTEM_USER_ASSERTION = """\ @@ -245,9 +245,10 @@ class TestRunCommands(CiTestCase): @skipUnlessJsonSchema() -class TestSchema(CiTestCase): +class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True + schema = schema def test_schema_warns_on_snap_not_as_dict(self): """If the snap configuration is not a dict, emit a warning.""" @@ -342,39 +343,27 @@ class TestSchema(CiTestCase): def test_duplicates_are_fine_array_array(self): """Duplicated commands array/array entries are allowed.""" - byebye = ["echo", "bye"] - try: - cfg = {'snap': {'commands': [byebye, byebye]}} - validate_cloudconfig_schema(cfg, schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("command entries can be duplicate.") + self.assertSchemaValid( + {'commands': [["echo", "bye"], ["echo" "bye"]]}, + "command entries can be duplicate.") def test_duplicates_are_fine_array_string(self): """Duplicated commands array/string entries are allowed.""" - byebye = "echo bye" - try: - cfg = {'snap': {'commands': [byebye, byebye]}} - validate_cloudconfig_schema(cfg, schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("command entries can be duplicate.") + self.assertSchemaValid( + {'commands': ["echo bye", "echo bye"]}, + "command entries can be duplicate.") def test_duplicates_are_fine_dict_array(self): """Duplicated commands dict/array entries are allowed.""" - byebye = ["echo", "bye"] - try: - cfg = {'snap': {'commands': {'00': byebye, '01': byebye}}} - validate_cloudconfig_schema(cfg, schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("command entries can be duplicate.") + self.assertSchemaValid( + {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, + "command entries can be duplicate.") def test_duplicates_are_fine_dict_string(self): """Duplicated commands dict/string entries are allowed.""" - byebye = "echo bye" - try: - cfg = {'snap': {'commands': {'00': byebye, '01': byebye}}} - validate_cloudconfig_schema(cfg, schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("command entries can be duplicate.") + self.assertSchemaValid( + {'commands': {'00': "echo bye", '01': "echo bye"}}, + "command entries can be duplicate.") class TestHandle(CiTestCase): diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py index f2a59faf..f1beeff8 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -7,7 +7,8 @@ from cloudinit.config.cc_ubuntu_advantage import ( handle, maybe_install_ua_tools, run_commands, schema) from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from cloudinit.tests.helpers import ( + CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) # Module path used in mocks @@ -105,9 +106,10 @@ class TestRunCommands(CiTestCase): @skipUnlessJsonSchema() -class TestSchema(CiTestCase): +class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True + schema = schema def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): """If ubuntu-advantage configuration is not a dict, emit a warning.""" @@ -169,6 +171,30 @@ class TestSchema(CiTestCase): {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) self.assertEqual('', self.logs.getvalue()) + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + {'commands': [["echo", "bye"], ["echo" "bye"]]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + {'commands': ["echo bye", "echo bye"]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_array(self): + """Duplicated commands dict/array entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_string(self): + """Duplicated commands dict/string entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': "echo bye", '01': "echo bye"}}, + "command entries can be duplicate.") + class TestHandle(CiTestCase): diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 5aada6e7..4999f1f6 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -24,6 +24,8 @@ try: except ImportError: from ConfigParser import ConfigParser +from cloudinit.config.schema import ( + SchemaValidationError, validate_cloudconfig_schema) from cloudinit import helpers as ch from cloudinit import util @@ -312,6 +314,23 @@ class HttprettyTestCase(CiTestCase): super(HttprettyTestCase, self).tearDown() +class SchemaTestCaseMixin(unittest2.TestCase): + + def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."): + """Assert the config is valid per self.schema. + + If there is only one top level key in the schema properties, then + the cfg will be put under that key.""" + props = list(self.schema.get('properties')) + # put cfg under top level key if there is only one in the schema + if len(props) == 1: + cfg = {props[0]: cfg} + try: + validate_cloudconfig_schema(cfg, self.schema, strict=True) + except SchemaValidationError: + self.fail(msg) + + def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py index c3abedde..b1375269 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/test_handler/test_handler_bootcmd.py @@ -1,9 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_bootcmd, schema +from cloudinit.config.cc_bootcmd import handle, schema from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from cloudinit.tests.helpers import ( + CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) import logging import tempfile @@ -50,7 +51,7 @@ class TestBootcmd(CiTestCase): """When the provided config doesn't contain bootcmd, skip it.""" cfg = {} mycloud = self._get_cloud('ubuntu') - cc_bootcmd.handle('notimportant', cfg, mycloud, LOG, None) + handle('notimportant', cfg, mycloud, LOG, None) self.assertIn( "Skipping module named notimportant, no 'bootcmd' key", self.logs.getvalue()) @@ -60,7 +61,7 @@ class TestBootcmd(CiTestCase): invalid_config = {'bootcmd': 1} cc = self._get_cloud('ubuntu') with self.assertRaises(TypeError) as context_manager: - cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, []) + handle('cc_bootcmd', invalid_config, cc, LOG, []) self.assertIn('Failed to shellify bootcmd', self.logs.getvalue()) self.assertEqual( "Input to shellify was type 'int'. Expected list or tuple.", @@ -76,7 +77,7 @@ class TestBootcmd(CiTestCase): invalid_config = {'bootcmd': 1} cc = self._get_cloud('ubuntu') with self.assertRaises(TypeError): - cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, []) + handle('cc_bootcmd', invalid_config, cc, LOG, []) self.assertIn( 'Invalid config:\nbootcmd: 1 is not of type \'array\'', self.logs.getvalue()) @@ -93,7 +94,7 @@ class TestBootcmd(CiTestCase): 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} cc = self._get_cloud('ubuntu') with self.assertRaises(TypeError) as context_manager: - cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, []) + handle('cc_bootcmd', invalid_config, cc, LOG, []) expected_warnings = [ 'bootcmd.1: 20 is not valid under any of the given schemas', 'bootcmd.3: {\'a\': \'n\'} is not valid under any of the given' @@ -117,7 +118,7 @@ class TestBootcmd(CiTestCase): 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]} with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - cc_bootcmd.handle('cc_bootcmd', valid_config, cc, LOG, []) + handle('cc_bootcmd', valid_config, cc, LOG, []) self.assertEqual(my_id + ' iid-datasource-none\n', util.load_file(out_file)) @@ -128,7 +129,7 @@ class TestBootcmd(CiTestCase): with mock.patch(self._etmpfile_path, FakeExtendedTempFile): with self.assertRaises(util.ProcessExecutionError) as ctxt_manager: - cc_bootcmd.handle('does-not-matter', valid_config, cc, LOG, []) + handle('does-not-matter', valid_config, cc, LOG, []) self.assertIn( 'Unexpected error while running command.\n' "Command: ['/bin/sh',", @@ -138,26 +139,21 @@ class TestBootcmd(CiTestCase): self.logs.getvalue()) -class TestSchema(CiTestCase): +@skipUnlessJsonSchema() +class TestSchema(CiTestCase, SchemaTestCaseMixin): """Directly test schema rather than through handle.""" + schema = schema + def test_duplicates_are_fine_array_array(self): - """Duplicated array entries are allowed.""" - try: - byebye = ["echo", "bye"] - schema.validate_cloudconfig_schema( - {'bootcmd': [byebye, byebye]}, cc_bootcmd.schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("runcmd entries as list are allowed to be duplicates.") + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + ["byebye", "byebye"], 'command entries can be duplicate') def test_duplicates_are_fine_array_string(self): - """Duplicated array entries entries are allowed in values of array.""" - try: - byebye = "echo bye" - schema.validate_cloudconfig_schema( - {'bootcmd': [byebye, byebye]}, cc_bootcmd.schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("runcmd entries are allowed to be duplicates.") + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + ["echo bye", "echo bye"], "command entries can be duplicate.") # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py index ee1981d1..9ce334ac 100644 --- a/tests/unittests/test_handler/test_handler_runcmd.py +++ b/tests/unittests/test_handler/test_handler_runcmd.py @@ -1,10 +1,11 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_runcmd, schema +from cloudinit.config.cc_runcmd import handle, schema from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, skipUnlessJsonSchema) + CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin, + skipUnlessJsonSchema) import logging import os @@ -35,7 +36,7 @@ class TestRuncmd(FilesystemMockingTestCase): """When the provided config doesn't contain runcmd, skip it.""" cfg = {} mycloud = self._get_cloud('ubuntu') - cc_runcmd.handle('notimportant', cfg, mycloud, LOG, None) + handle('notimportant', cfg, mycloud, LOG, None) self.assertIn( "Skipping module named notimportant, no 'runcmd' key", self.logs.getvalue()) @@ -44,7 +45,7 @@ class TestRuncmd(FilesystemMockingTestCase): """Commands which can't be converted to shell will raise errors.""" invalid_config = {'runcmd': 1} cc = self._get_cloud('ubuntu') - cc_runcmd.handle('cc_runcmd', invalid_config, cc, LOG, []) + handle('cc_runcmd', invalid_config, cc, LOG, []) self.assertIn( 'Failed to shellify 1 into file' ' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd', @@ -59,7 +60,7 @@ class TestRuncmd(FilesystemMockingTestCase): """ invalid_config = {'runcmd': 1} cc = self._get_cloud('ubuntu') - cc_runcmd.handle('cc_runcmd', invalid_config, cc, LOG, []) + handle('cc_runcmd', invalid_config, cc, LOG, []) self.assertIn( 'Invalid config:\nruncmd: 1 is not of type \'array\'', self.logs.getvalue()) @@ -75,7 +76,7 @@ class TestRuncmd(FilesystemMockingTestCase): invalid_config = { 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} cc = self._get_cloud('ubuntu') - cc_runcmd.handle('cc_runcmd', invalid_config, cc, LOG, []) + handle('cc_runcmd', invalid_config, cc, LOG, []) expected_warnings = [ 'runcmd.1: 20 is not valid under any of the given schemas', 'runcmd.3: {\'a\': \'n\'} is not valid under any of the given' @@ -90,7 +91,7 @@ class TestRuncmd(FilesystemMockingTestCase): """Valid runcmd schema is written to a runcmd shell script.""" valid_config = {'runcmd': [['ls', '/']]} cc = self._get_cloud('ubuntu') - cc_runcmd.handle('cc_runcmd', valid_config, cc, LOG, []) + handle('cc_runcmd', valid_config, cc, LOG, []) runcmd_file = os.path.join( self.new_root, 'var/lib/cloud/instances/iid-datasource-none/scripts/runcmd') @@ -99,25 +100,22 @@ class TestRuncmd(FilesystemMockingTestCase): self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode)) -class TestSchema(CiTestCase): +@skipUnlessJsonSchema() +class TestSchema(CiTestCase, SchemaTestCaseMixin): """Directly test schema rather than through handle.""" - def test_duplicates_are_fine_array(self): - """Duplicated array entries are allowed.""" - try: - byebye = ["echo", "bye"] - schema.validate_cloudconfig_schema( - {'runcmd': [byebye, byebye]}, cc_runcmd.schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("runcmd entries as list are allowed to be duplicates.") - - def test_duplicates_are_fine_string(self): - """Duplicated string entries are allowed.""" - try: - byebye = "echo bye" - schema.validate_cloudconfig_schema( - {'runcmd': [byebye, byebye]}, cc_runcmd.schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("runcmd entries are allowed to be duplicates.") + schema = schema + + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + [["echo", "bye"], ["echo", "bye"]], + "command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + ["echo bye", "echo bye"], + "command entries can be duplicate.") # vi: ts=4 expandtab -- cgit v1.2.3 From 8e111502a0f9d437ff6045f1269c95e5756fc43b Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Fri, 20 Apr 2018 20:30:44 -0400 Subject: DataSourceSmartOS: list() should always return a list If customer_metadata has no keys, the KEYS request returns an empty string. Callers of the list() method expect a list to be returned and will give a stack trace if this expectation is not met. LP: #1763480 --- cloudinit/sources/DataSourceSmartOS.py | 6 +++--- tests/unittests/test_datasource/test_smartos.py | 26 +++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 5 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index c8998b40..d4386fec 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -455,9 +455,9 @@ class JoyentMetadataClient(object): def list(self): result = self.request(rtype='KEYS') - if result: - result = result.split('\n') - return result + if not result: + return [] + return result.split('\n') def put(self, key, val): param = b' '.join([base64.b64encode(i.encode()) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 2bea7a17..6fd431f9 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -319,6 +319,12 @@ MOCK_RETURNS = { DMI_DATA_RETURN = 'smartdc' +# Useful for calculating the length of a frame body. A SUCCESS body will be +# followed by more characters or be one character less if SUCCESS with no +# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html. +SUCCESS_LEN = len('0123abcd SUCCESS ') +NOTFOUND_LEN = len('0123abcd NOTFOUND') + class PsuedoJoyentClient(object): def __init__(self, data=None): @@ -651,7 +657,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): self.response_parts = { 'command': 'SUCCESS', 'crc': 'b5a9ff00', - 'length': 17 + len(b64e(self.metadata_value)), + 'length': SUCCESS_LEN + len(b64e(self.metadata_value)), 'payload': b64e(self.metadata_value), 'request_id': '{0:08x}'.format(self.request_id), } @@ -787,7 +793,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): def test_get_metadata_returns_None_if_value_not_found(self): self.response_parts['payload'] = '' self.response_parts['command'] = 'NOTFOUND' - self.response_parts['length'] = 17 + self.response_parts['length'] = NOTFOUND_LEN client = self._get_client() client._checksum = lambda _: self.response_parts['crc'] self.assertIsNone(client.get('some_key')) @@ -838,6 +844,22 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): client.open_transport() self.assertTrue(reader.emptied) + def test_list_metadata_returns_list(self): + parts = ['foo', 'bar'] + value = b64e('\n'.join(parts)) + self.response_parts['payload'] = value + self.response_parts['crc'] = '40873553' + self.response_parts['length'] = SUCCESS_LEN + len(value) + client = self._get_client() + self.assertEqual(client.list(), parts) + + def test_list_metadata_returns_empty_list_if_no_customer_metadata(self): + del self.response_parts['payload'] + self.response_parts['length'] = SUCCESS_LEN - 1 + self.response_parts['crc'] = '14e563ba' + client = self._get_client() + self.assertEqual(client.list(), []) + class TestNetworkConversion(TestCase): def test_convert_simple(self): -- cgit v1.2.3 From 23479881f51bae7a3f5743ce677ed82317ea8b9f Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Fri, 20 Apr 2018 20:56:36 -0400 Subject: DataSourceSmartOS: sdc:hostname is ignored There are three potential sources of the hostname, one of which is documented SmartOS's vmadm(1M) via the hostname property. That property's value is retrieved via the sdc:hostname key. The other two sources for the hostname are a hostname key in customer_metadata and the VM's uuid (sdc:uuid). Of these three, the sdc:hostname value is not used in a meaningful way by DataSourceSmartOS. This fix changes the fallback mechanism when hostname is not specified in customer_metadata. The order of precedence for setting the hostname is now 1) hostname in customer_metadata, 2) sdc:hostname, then 3) sdc:uuid. LP: #1765085 --- cloudinit/sources/DataSourceSmartOS.py | 10 +++++++-- tests/unittests/test_datasource/test_smartos.py | 28 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 2 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index d4386fec..0ef10035 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -11,7 +11,7 @@ # SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests # The meta-data is transmitted via key/value pairs made by # requests on the console. For example, to get the hostname, you -# would send "GET hostname" on /dev/ttyS1. +# would send "GET sdc:hostname" on /dev/ttyS1. # For Linux Guests running in LX-Brand Zones on SmartOS hosts # a socket (/native/.zonecontrol/metadata.sock) is used instead # of a serial console. @@ -273,8 +273,14 @@ class DataSourceSmartOS(sources.DataSource): write_boot_content(u_data, u_data_f) # Handle the cloud-init regular meta + + # The hostname may or may not be qualified with the local domain name. + # This follows section 3.14 of RFC 2132. if not md['local-hostname']: - md['local-hostname'] = md['instance-id'] + if md['hostname']: + md['local-hostname'] = md['hostname'] + else: + md['local-hostname'] = md['instance-id'] ud = None if md['user-data']: diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 6fd431f9..b926263f 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -437,6 +437,34 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.assertEqual(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) + def test_hostname_if_no_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_hostname_if_no_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:uuid'], + dsrc.metadata['local-hostname']) + def test_userdata(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() -- cgit v1.2.3 From 4ed164592fe8cb15758cacf3cb3f8c7d5ab7c82e Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Mon, 23 Apr 2018 09:29:39 -0400 Subject: DataSourceSmartOS: add locking of serial device. cloud-init and mdata-get each have their own implementation of the SmartOS metadata protocol. If cloud-init and other services that call mdata-get are run concurrently, crosstalk on the serial port can cause them both to become confused. This change makes it so that cloud-init uses the same cooperative locking scheme that's used by mdata-get, thus preventing cross-talk between mdata-get and cloud-init. For testing, a VM running on a SmartOS host and pyserial are required. If the tests are run on a platform other than SmartOS, those that use a real serial port are skipped. pyserial remains commented in requirements.txt because most testers will not be running atop SmartOS. LP: #1746605 --- cloudinit/sources/DataSourceSmartOS.py | 2 + tests/unittests/test_datasource/test_smartos.py | 67 ++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 2 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 0ef10035..4ea00eb1 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -23,6 +23,7 @@ import base64 import binascii import errno +import fcntl import json import os import random @@ -526,6 +527,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient): if not ser.isOpen(): raise SystemError("Unable to open %s" % self.device) self.fp = ser + fcntl.lockf(ser, fcntl.LOCK_EX) self._flush() self._negotiate() diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index b926263f..706e8eb8 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -16,23 +16,27 @@ from __future__ import print_function from binascii import crc32 import json +import multiprocessing import os import os.path import re import shutil +import signal import stat import tempfile +import unittest2 import uuid from cloudinit import serial from cloudinit.sources import DataSourceSmartOS from cloudinit.sources.DataSourceSmartOS import ( - convert_smartos_network_data as convert_net) + convert_smartos_network_data as convert_net, + SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ) import six from cloudinit import helpers as c_helpers -from cloudinit.util import b64e +from cloudinit.util import (b64e, subp) from cloudinit.tests.helpers import mock, FilesystemMockingTestCase, TestCase @@ -1023,4 +1027,63 @@ class TestNetworkConversion(TestCase): found = convert_net(SDC_NICS_SINGLE_GATEWAY) self.assertEqual(expected, found) + +@unittest2.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, + "Only supported on KVM and bhyve guests under SmartOS") +@unittest2.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), + "Requires write access to " + SERIAL_DEVICE) +class TestSerialConcurrency(TestCase): + """ + This class tests locking on an actual serial port, and as such can only + be run in a kvm or bhyve guest running on a SmartOS host. A test run on + a metadata socket will not be valid because a metadata socket ensures + there is only one session over a connection. In contrast, in the + absence of proper locking multiple processes opening the same serial + port can corrupt each others' exchanges with the metadata server. + """ + def setUp(self): + self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) + self.mdata_proc.start() + super(TestSerialConcurrency, self).setUp() + + def tearDown(self): + # os.kill() rather than mdata_proc.terminate() to avoid console spam. + os.kill(self.mdata_proc.pid, signal.SIGKILL) + self.mdata_proc.join() + super(TestSerialConcurrency, self).tearDown() + + def start_mdata_loop(self): + """ + The mdata-get command is repeatedly run in a separate process so + that it may try to race with metadata operations performed in the + main test process. Use of mdata-get is better than two processes + using the protocol implementation in DataSourceSmartOS because we + are testing to be sure that cloud-init and mdata-get respect each + others locks. + """ + rcs = list(range(0, 256)) + while True: + subp(['mdata-get', 'sdc:routes'], rcs=rcs) + + def test_all_keys(self): + self.assertIsNotNone(self.mdata_proc.pid) + ds = DataSourceSmartOS + keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] + keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) + + client = ds.jmc_client_factory() + self.assertIsNotNone(client) + + # The behavior that we are testing for was observed mdata-get running + # 10 times at roughly the same time as cloud-init fetched each key + # once. cloud-init would regularly see failures before making it + # through all keys once. + for _ in range(0, 3): + for key in keys: + # We don't care about the return value, just that it doesn't + # thrown any exceptions. + client.get(key) + + self.assertIsNone(self.mdata_proc.exitcode) + # vi: ts=4 expandtab -- cgit v1.2.3 From a57928d3c314d9568712cd190cb1e721e14c108b Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 25 Apr 2018 16:18:27 -0400 Subject: sysconfig: dhcp6 subnet type should not imply dhcpv4 BOOTPROTO=dhcp in sysconfig enables DHCPv4 and we should not do this implicitly when 'dhcp6' subnet is specified. In case both dhcpv4 and dhcpv6 are needed users should specify both: subnets: - type: dhcp6 - type: dhcp Fix the current code and add a dhcpv6 only test. Signed-off-by: Vitaly Kuznetsov --- cloudinit/net/sysconfig.py | 1 - tests/unittests/test_net.py | 59 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 58 insertions(+), 2 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 7a7f5093..e53b9f1b 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -287,7 +287,6 @@ class Renderer(renderer.Renderer): if subnet_type == 'dhcp6': iface_cfg['IPV6INIT'] = True iface_cfg['DHCPV6C'] = True - iface_cfg['BOOTPROTO'] = 'dhcp' elif subnet_type in ['dhcp4', 'dhcp']: iface_cfg['BOOTPROTO'] = 'dhcp' elif subnet_type == 'static': diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index c12a487a..84839fd7 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -553,6 +553,43 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_only': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'all': { 'expected_eni': ("""\ auto lo @@ -740,7 +777,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true """miimon=100" BONDING_SLAVE0=eth1 BONDING_SLAVE1=eth2 - BOOTPROTO=dhcp + BOOTPROTO=none DEVICE=bond0 DHCPV6C=yes IPV6INIT=yes @@ -1829,6 +1866,12 @@ USERCTL=no self._compare_files_to_expected(entry['expected_sysconfig'], found) self._assert_headers(found) + def test_dhcpv6_only_config(self): + entry = NETWORK_CONFIGS['dhcpv6_only'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._assert_headers(found) + class TestEniNetRendering(CiTestCase): @@ -2277,6 +2320,13 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_dhcpv6_only(self): + entry = NETWORK_CONFIGS['dhcpv6_only'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_all(self): entry = NETWORK_CONFIGS['all'] files = self._render_and_read(network_config=yaml.load(entry['yaml'])) @@ -2345,6 +2395,13 @@ class TestEniRoundTrip(CiTestCase): entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) + def testsimple_render_dhcpv6_only(self): + entry = NETWORK_CONFIGS['dhcpv6_only'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + def testsimple_render_v4_and_v6_static(self): entry = NETWORK_CONFIGS['v4_and_v6_static'] files = self._render_and_read(network_config=yaml.load(entry['yaml'])) -- cgit v1.2.3 From 4731c8da25ee9bfbcf0ade1d7ffec95814d8622a Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 26 Apr 2018 16:35:23 -0400 Subject: net: detect unstable network names and trigger a settle if needed The cloud-init-local.service expects that any network device name changes have already been completed by the kernel or udev daemon. In some situations we've found that the renaming of interfaces from kernel names (eth0, eth1, etc) to their persistent names (eno1, ens3, enp0s1, etc) may happen after cloud-init-local has started where it reads values from sysfs about what network devices are present, and which device to use as a fallback nic. Subsequently, cloud-init-local would write out network configuration for a kernel device name which would no longer be present by the time that networking services start to bring up the devices. The result is that the instance does not get networking configured. Prior to use of systemd-networkd, the Ubuntu 'networking.service' unit included a call to udevadm settle which is why this race is not seen on a Xenial system. This change adds the ability to detect if an interface has a stable name, if if we find one without stable names and stable names have not been disabled (net.ifnames=0 in /proc/cmdline), then cloud-init will invoke udevadm settle. LP: #1766287 --- cloudinit/config/cc_disk_setup.py | 12 ++---- cloudinit/net/__init__.py | 26 ++++++++++++ cloudinit/net/tests/test_init.py | 1 + cloudinit/sources/DataSourceAltCloud.py | 5 +-- cloudinit/tests/test_util.py | 49 ++++++++++++++++++++++ cloudinit/util.py | 15 +++++++ tests/unittests/test_net.py | 73 +++++++++++++++++++++++++++++++-- 7 files changed, 165 insertions(+), 16 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index c3e8c484..943089e0 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -680,13 +680,13 @@ def read_parttbl(device): reliable way to probe the partition table. """ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] - udevadm_settle() + util.udevadm_settle() try: util.subp(blkdev_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) - udevadm_settle() + util.udevadm_settle() def exec_mkpart_mbr(device, layout): @@ -737,14 +737,10 @@ def exec_mkpart(table_type, device, layout): return get_dyn_func("exec_mkpart_%s", table_type, device, layout) -def udevadm_settle(): - util.subp(['udevadm', 'settle']) - - def assert_and_settle_device(device): """Assert that device exists and settle so it is fully recognized.""" if not os.path.exists(device): - udevadm_settle() + util.udevadm_settle() if not os.path.exists(device): raise RuntimeError("Device %s did not exist and was not created " "with a udevamd settle." % device) @@ -752,7 +748,7 @@ def assert_and_settle_device(device): # Whether or not the device existed above, it is possible that udev # events that would populate udev database (for reading by lsdname) have # not yet finished. So settle again. - udevadm_settle() + util.udevadm_settle() def mkpart(device, definition): diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 80054546..43226bd0 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -107,6 +107,21 @@ def is_bond(devname): return os.path.exists(sys_dev_path(devname, "bonding")) +def is_renamed(devname): + """ + /* interface name assignment types (sysfs name_assign_type attribute) */ + #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */ + #define NET_NAME_ENUM 1 /* enumerated by kernel */ + #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */ + #define NET_NAME_USER 3 /* provided by user-space */ + #define NET_NAME_RENAMED 4 /* renamed by user-space */ + """ + name_assign_type = read_sys_net_safe(devname, 'name_assign_type') + if name_assign_type and name_assign_type in ['3', '4']: + return True + return False + + def is_vlan(devname): uevent = str(read_sys_net_safe(devname, "uevent")) return 'DEVTYPE=vlan' in uevent.splitlines() @@ -180,6 +195,17 @@ def find_fallback_nic(blacklist_drivers=None): if not blacklist_drivers: blacklist_drivers = [] + if 'net.ifnames=0' in util.get_cmdline(): + LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline') + else: + unstable = [device for device in get_devicelist() + if device != 'lo' and not is_renamed(device)] + if len(unstable): + LOG.debug('Found unstable nic names: %s; calling udevadm settle', + unstable) + msg = 'Waiting for udev events to settle' + util.log_time(LOG.debug, msg, func=util.udevadm_settle) + # get list of interfaces that could have connections invalid_interfaces = set(['lo']) potential_interfaces = set([device for device in get_devicelist() diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 276556ee..5c017d15 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -199,6 +199,7 @@ class TestGenerateFallbackConfig(CiTestCase): self.sysdir = self.tmp_dir() + '/' self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) + self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') def test_generate_fallback_finds_connected_eth_with_mac(self): """generate_fallback_config finds any connected device with a mac.""" diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index e1d0055b..f6e86f34 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -29,7 +29,6 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' # Shell command lists CMD_PROBE_FLOPPY = ['modprobe', 'floppy'] -CMD_UDEVADM_SETTLE = ['udevadm', 'settle', '--timeout=5'] META_DATA_NOT_SUPPORTED = { 'block-device-mapping': {}, @@ -196,9 +195,7 @@ class DataSourceAltCloud(sources.DataSource): # udevadm settle for floppy device try: - cmd = CMD_UDEVADM_SETTLE - cmd.append('--exit-if-exists=' + floppy_dev) - (cmd_out, _err) = util.subp(cmd) + (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5) LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 76eed076..3c05a437 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -212,4 +212,53 @@ class TestBlkid(CiTestCase): capture=True, decode="replace") +@mock.patch('cloudinit.util.subp') +class TestUdevadmSettle(CiTestCase): + def test_with_no_params(self, m_subp): + """called with no parameters.""" + util.udevadm_settle() + m_subp.called_once_with(mock.call(['udevadm', 'settle'])) + + def test_with_exists_and_not_exists(self, m_subp): + """with exists=file where file does not exist should invoke subp.""" + mydev = self.tmp_path("mydev") + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + ['udevadm', 'settle', '--exit-if-exists=%s' % mydev]) + + def test_with_exists_and_file_exists(self, m_subp): + """with exists=file where file does exist should not invoke subp.""" + mydev = self.tmp_path("mydev") + util.write_file(mydev, "foo\n") + util.udevadm_settle(exists=mydev) + self.assertIsNone(m_subp.call_args) + + def test_with_timeout_int(self, m_subp): + """timeout can be an integer.""" + timeout = 9 + util.udevadm_settle(timeout=timeout) + m_subp.called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout]) + + def test_with_timeout_string(self, m_subp): + """timeout can be a string.""" + timeout = "555" + util.udevadm_settle(timeout=timeout) + m_subp.assert_called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout]) + + def test_with_exists_and_timeout(self, m_subp): + """test call with both exists and timeout.""" + mydev = self.tmp_path("mydev") + timeout = "3" + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + ['udevadm', 'settle', '--exit-if-exists=%s' % mydev, + '--timeout=%s' % timeout]) + + def test_subp_exception_raises_to_caller(self, m_subp): + m_subp.side_effect = util.ProcessExecutionError("BOOM") + self.assertRaises(util.ProcessExecutionError, util.udevadm_settle) + + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 310758dd..2828ca38 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2727,4 +2727,19 @@ def mount_is_read_write(mount_point): mount_opts = result[-1].split(',') return mount_opts[0] == 'rw' + +def udevadm_settle(exists=None, timeout=None): + """Invoke udevadm settle with optional exists and timeout parameters""" + settle_cmd = ["udevadm", "settle"] + if exists: + # skip the settle if the requested path already exists + if os.path.exists(exists): + return + settle_cmd.extend(['--exit-if-exists=%s' % exists]) + if timeout: + settle_cmd.extend(['--timeout=%s' % timeout]) + + return subp(settle_cmd) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 84839fd7..fac82678 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1442,6 +1442,7 @@ DEFAULT_DEV_ATTRS = { "address": "07-1C-C6-75-A4-BE", "device/driver": None, "device/device": None, + "name_assign_type": "4", } } @@ -1489,11 +1490,14 @@ class TestGenerateFallbackConfig(CiTestCase): 'eth0': { 'bridge': False, 'carrier': False, 'dormant': False, 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'hv_netsvc', 'device/device': '0x3'}, + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': '4'}, 'eth1': { 'bridge': False, 'carrier': False, 'dormant': False, 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'mlx4_core', 'device/device': '0x7'}, + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + } tmp_dir = self.tmp_dir() @@ -1549,11 +1553,13 @@ iface eth0 inet dhcp 'eth1': { 'bridge': False, 'carrier': False, 'dormant': False, 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'hv_netsvc', 'device/device': '0x3'}, + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': '4'}, 'eth0': { 'bridge': False, 'carrier': False, 'dormant': False, 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'mlx4_core', 'device/device': '0x7'}, + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, } tmp_dir = self.tmp_dir() @@ -1602,6 +1608,65 @@ iface eth1 inet dhcp ] self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) + @mock.patch("cloudinit.util.udevadm_settle") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_unstable_names(self, mock_get_devicelist, mock_read_sys_net, + mock_sys_dev_path, mock_settle): + """verify that udevadm settle is called when we find unstable names""" + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': False}, + 'ens4': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + + } + + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + net.generate_fallback_config(config_driver=True) + self.assertEqual(1, mock_settle.call_count) + + @mock.patch("cloudinit.util.get_cmdline") + @mock.patch("cloudinit.util.udevadm_settle") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_unstable_names_disabled(self, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + mock_settle, m_get_cmdline): + """verify udevadm settle not called when cmdline has net.ifnames=0""" + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': False}, + 'ens4': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + + } + + m_get_cmdline.return_value = 'net.ifnames=0' + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + net.generate_fallback_config(config_driver=True) + self.assertEqual(0, mock_settle.call_count) + class TestSysConfigRendering(CiTestCase): -- cgit v1.2.3 From 6ef92c98c3d2b127b05d6708337efc8a81e00071 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 26 Apr 2018 16:24:24 -0500 Subject: IBMCloud: recognize provisioning environment during debug boots. When images are deployed from template in a production environment the artifacts of the provisioning stage (provisioningConfiguration.cfg) that cloud-init referenced are cleaned up. However, when provisioned in "debug" mode (internal to IBM) the artifacts are left. This changes the 'is_ibm_provisioning' implementations in both ds-identify and in the IBM datasource to identify the provisioning stage more correctly. The change is to consider provisioning only if the provisioing file existed and there was no log file or the log file was older than this boot. LP: #1767166 --- cloudinit/sources/DataSourceIBMCloud.py | 42 +++++++++----- cloudinit/tests/helpers.py | 13 ++++- tests/unittests/test_datasource/test_ibmcloud.py | 50 ++++++++++++++++ tests/unittests/test_ds_identify.py | 72 +++++++++++++++++++++--- tools/ds-identify | 21 ++++++- 5 files changed, 175 insertions(+), 23 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index cfa724bf..01106ec0 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -8,17 +8,11 @@ There are 2 different api exposed launch methods. * template: This is the legacy method of launching instances. When booting from an image template, the system boots first into a "provisioning" mode. There, host <-> guest mechanisms are utilized - to execute code in the guest and provision it. + to execute code in the guest and configure it. The configuration + includes configuring the system network and possibly installing + packages and other software stack. - Cloud-init will disable itself when it detects that it is in the - provisioning mode. It detects this by the presence of - a file '/root/provisioningConfiguration.cfg'. - - When provided with user-data, the "first boot" will contain a - ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data - provided, then there is no data-source. - - Cloud-init never does any network configuration in this mode. + After the provisioning is finished, the system reboots. * os_code: Essentially "launch by OS Code" (Operating System Code). This is a more modern approach. There is no specific "provisioning" boot. @@ -200,8 +194,30 @@ def _is_xen(): return os.path.exists("/proc/xen") -def _is_ibm_provisioning(): - return os.path.exists("/root/provisioningConfiguration.cfg") +def _is_ibm_provisioning( + prov_cfg="/root/provisioningConfiguration.cfg", + inst_log="/root/swinstall.log", + boot_ref="/proc/1/environ"): + """Return boolean indicating if this boot is ibm provisioning boot.""" + if os.path.exists(prov_cfg): + msg = "config '%s' exists." % prov_cfg + result = True + if os.path.exists(inst_log): + if os.path.exists(boot_ref): + result = (os.stat(inst_log).st_mtime > + os.stat(boot_ref).st_mtime) + msg += (" log '%s' from %s boot." % + (inst_log, "current" if result else "previous")) + else: + msg += (" log '%s' existed, but no reference file '%s'." % + (inst_log, boot_ref)) + result = False + else: + msg += " log '%s' did not exist." % inst_log + else: + result, msg = (False, "config '%s' did not exist." % prov_cfg) + LOG.debug("ibm_provisioning=%s: %s", result, msg) + return result def get_ibm_platform(): @@ -251,7 +267,7 @@ def get_ibm_platform(): else: return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) elif _is_ibm_provisioning(): - return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) + return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) return not_found diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 4999f1f6..117a9cfe 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -8,6 +8,7 @@ import os import shutil import sys import tempfile +import time import unittest import mock @@ -263,7 +264,8 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): os.path: [('isfile', 1), ('exists', 1), ('islink', 1), ('isdir', 1), ('lexists', 1)], os: [('listdir', 1), ('mkdir', 1), - ('lstat', 1), ('symlink', 2)] + ('lstat', 1), ('symlink', 2), + ('stat', 1)] } if hasattr(os, 'scandir'): @@ -349,6 +351,15 @@ def populate_dir(path, files): return ret +def populate_dir_with_ts(path, data): + """data is {'file': ('contents', mtime)}. mtime relative to now.""" + populate_dir(path, dict((k, v[0]) for k, v in data.items())) + btime = time.time() + for fpath, (_contents, mtime) in data.items(): + ts = btime + mtime if mtime else btime + os.utime(os.path.sep.join((path, fpath)), (ts, ts)) + + def dir2dict(startdir, prefix=None): flist = {} if prefix is None: diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/test_datasource/test_ibmcloud.py index 621cfe49..e639ae47 100644 --- a/tests/unittests/test_datasource/test_ibmcloud.py +++ b/tests/unittests/test_datasource/test_ibmcloud.py @@ -259,4 +259,54 @@ class TestReadMD(test_helpers.CiTestCase): ret['metadata']) +class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): + """Test the _is_ibm_provisioning method.""" + inst_log = "/root/swinstall.log" + prov_cfg = "/root/provisioningConfiguration.cfg" + boot_ref = "/proc/1/environ" + with_logs = True + + def _call_with_root(self, rootd): + self.reRoot(rootd) + return ibm._is_ibm_provisioning() + + def test_no_config(self): + """No provisioning config means not provisioning.""" + self.assertFalse(self._call_with_root(self.tmp_dir())) + + def test_config_only(self): + """A provisioning config without a log means provisioning.""" + rootd = self.tmp_dir() + test_helpers.populate_dir(rootd, {self.prov_cfg: "key=value"}) + self.assertTrue(self._call_with_root(rootd)) + + def test_config_with_old_log(self): + """A config with a log from previous boot is not provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", -30), + self.boot_ref: ("PWD=/", 0)} + test_helpers.populate_dir_with_ts(rootd, data) + self.assertFalse(self._call_with_root(rootd=rootd)) + self.assertIn("from previous boot", self.logs.getvalue()) + + def test_config_with_new_log(self): + """A config with a log from this boot is provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", 30), + self.boot_ref: ("PWD=/", 0)} + test_helpers.populate_dir_with_ts(rootd, data) + self.assertTrue(self._call_with_root(rootd=rootd)) + self.assertIn("from current boot", self.logs.getvalue()) + + def test_config_and_log_no_reference(self): + """If the config and log existed, but no reference, assume not.""" + rootd = self.tmp_dir() + test_helpers.populate_dir( + rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}) + self.assertFalse(self._call_with_root(rootd=rootd)) + self.assertIn("no reference file", self.logs.getvalue()) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 53643989..ad7fe41e 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +from collections import namedtuple import copy import os from uuid import uuid4 @@ -7,7 +8,7 @@ from uuid import uuid4 from cloudinit import safeyaml from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, dir2dict, populate_dir) + CiTestCase, dir2dict, populate_dir, populate_dir_with_ts) from cloudinit.sources import DataSourceIBMCloud as dsibm @@ -66,7 +67,6 @@ P_SYS_VENDOR = "sys/class/dmi/id/sys_vendor" P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" -IBM_PROVISIONING_CHECK_PATH = "/root/provisioningConfiguration.cfg" IBM_CONFIG_UUID = "9796-932E" MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} @@ -74,11 +74,17 @@ MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0} MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0} +shell_true = 0 +shell_false = 1 -class TestDsIdentify(CiTestCase): +CallReturn = namedtuple('CallReturn', + ['rc', 'stdout', 'stderr', 'cfg', 'files']) + + +class DsIdentifyBase(CiTestCase): dsid_path = os.path.realpath('tools/ds-identify') - def call(self, rootd=None, mocks=None, args=None, files=None, + def call(self, rootd=None, mocks=None, func="main", args=None, files=None, policy_dmi=DI_DEFAULT_POLICY, policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI, ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT): @@ -135,7 +141,7 @@ class TestDsIdentify(CiTestCase): mocklines.append(write_mock(d)) endlines = [ - 'main %s' % ' '.join(['"%s"' % s for s in args]) + func + ' ' + ' '.join(['"%s"' % s for s in args]) ] with open(wrap, "w") as fp: @@ -159,7 +165,7 @@ class TestDsIdentify(CiTestCase): cfg = {"_INVALID_YAML": contents, "_EXCEPTION": str(e)} - return rc, out, err, cfg, dir2dict(rootd) + return CallReturn(rc, out, err, cfg, dir2dict(rootd)) def _call_via_dict(self, data, rootd=None, **kwargs): # return output of self.call with a dict input like VALID_CFG[item] @@ -190,6 +196,8 @@ class TestDsIdentify(CiTestCase): _print_run_output(rc, out, err, cfg, files) return rc, out, err, cfg, files + +class TestDsIdentify(DsIdentifyBase): def test_wb_print_variables(self): """_print_info reports an array of discovered variables to stderr.""" data = VALID_CFG['Azure-dmi-detection'] @@ -250,7 +258,10 @@ class TestDsIdentify(CiTestCase): Template provisioning with user-data has METADATA disk, datasource should return not found.""" data = copy.deepcopy(VALID_CFG['IBMCloud-metadata']) - data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + # change the 'is_ibm_provisioning' mock to return 1 (false) + isprov_m = [m for m in data['mocks'] + if m["name"] == "is_ibm_provisioning"][0] + isprov_m['ret'] = shell_true return self._check_via_dict(data, RC_NOT_FOUND) def test_ibmcloud_template_userdata(self): @@ -265,7 +276,8 @@ class TestDsIdentify(CiTestCase): no disks attached. Datasource should return not found.""" data = copy.deepcopy(VALID_CFG['IBMCloud-nodisks']) - data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + data['mocks'].append( + {'name': 'is_ibm_provisioning', 'ret': shell_true}) return self._check_via_dict(data, RC_NOT_FOUND) def test_ibmcloud_template_no_userdata(self): @@ -446,6 +458,47 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('Hetzner') +class TestIsIBMProvisioning(DsIdentifyBase): + """Test the is_ibm_provisioning method in ds-identify.""" + + inst_log = "/root/swinstall.log" + prov_cfg = "/root/provisioningConfiguration.cfg" + boot_ref = "/proc/1/environ" + funcname = "is_ibm_provisioning" + + def test_no_config(self): + """No provisioning config means not provisioning.""" + ret = self.call(files={}, func=self.funcname) + self.assertEqual(shell_false, ret.rc) + + def test_config_only(self): + """A provisioning config without a log means provisioning.""" + ret = self.call(files={self.prov_cfg: "key=value"}, func=self.funcname) + self.assertEqual(shell_true, ret.rc) + + def test_config_with_old_log(self): + """A config with a log from previous boot is not provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", -30), + self.boot_ref: ("PWD=/", 0)} + populate_dir_with_ts(rootd, data) + ret = self.call(rootd=rootd, func=self.funcname) + self.assertEqual(shell_false, ret.rc) + self.assertIn("from previous boot", ret.stderr) + + def test_config_with_new_log(self): + """A config with a log from this boot is provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", 30), + self.boot_ref: ("PWD=/", 0)} + populate_dir_with_ts(rootd, data) + ret = self.call(rootd=rootd, func=self.funcname) + self.assertEqual(shell_true, ret.rc) + self.assertIn("from current boot", ret.stderr) + + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" if disks is None: @@ -639,6 +692,7 @@ VALID_CFG = { 'ds': 'IBMCloud', 'mocks': [ MOCK_VIRT_IS_XEN, + {'name': 'is_ibm_provisioning', 'ret': shell_false}, {'name': 'blkid', 'ret': 0, 'out': blkid_out( [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, @@ -652,6 +706,7 @@ VALID_CFG = { 'ds': 'IBMCloud', 'mocks': [ MOCK_VIRT_IS_XEN, + {'name': 'is_ibm_provisioning', 'ret': shell_false}, {'name': 'blkid', 'ret': 0, 'out': blkid_out( [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(), @@ -669,6 +724,7 @@ VALID_CFG = { 'ds': 'IBMCloud', 'mocks': [ MOCK_VIRT_IS_XEN, + {'name': 'is_ibm_provisioning', 'ret': shell_false}, {'name': 'blkid', 'ret': 0, 'out': blkid_out( [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, diff --git a/tools/ds-identify b/tools/ds-identify index 9a2db5c4..7fff5d1e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -125,6 +125,7 @@ DI_ON_NOTFOUND="" DI_EC2_STRICT_ID_DEFAULT="true" _IS_IBM_CLOUD="" +_IS_IBM_PROVISIONING="" error() { set -- "ERROR:" "$@"; @@ -1006,7 +1007,25 @@ dscheck_Hetzner() { } is_ibm_provisioning() { - [ -f "${PATH_ROOT}/root/provisioningConfiguration.cfg" ] + local pcfg="${PATH_ROOT}/root/provisioningConfiguration.cfg" + local logf="${PATH_ROOT}/root/swinstall.log" + local is_prov=false msg="config '$pcfg' did not exist." + if [ -f "$pcfg" ]; then + msg="config '$pcfg' exists." + is_prov=true + if [ -f "$logf" ]; then + if [ "$logf" -nt "$PATH_PROC_1_ENVIRON" ]; then + msg="$msg log '$logf' from current boot." + else + is_prov=false + msg="$msg log '$logf' from previous boot." + fi + else + msg="$msg log '$logf' did not exist." + fi + fi + debug 2 "ibm_provisioning=$is_prov: $msg" + [ "$is_prov" = "true" ] } is_ibm_cloud() { -- cgit v1.2.3 From 11172924a48a47a7231d19d9cefe628dfddda8bf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 30 Apr 2018 13:21:51 -0600 Subject: IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. Ubuntu images on IBMCloud for 16.04 have some seed data in /var/lib/cloud/data/seed/nocloud-net. In order to have systems with IBMCloud enabled, we modified ds-identify detection to skip that seed if the system was on IBMCloud. That change did not consider the fact that IBMCloud might not be in the datasource list. There was similar logic in the ConfigDrive datasource in ds-identify and the datasource itself. Config drive is now updated to only check and avoid IBMCloud if IBMCloud is enabled. The check in ds-identify for nocloud was dropped. If a user provides a nocloud seed on IBMCloud, then that can be used. This means that systems running Xenial will continue to get their old datasources. LP: #1766401 --- cloudinit/sources/DataSourceConfigDrive.py | 11 +++-- tests/unittests/test_ds_identify.py | 77 +++++++++++++++++++++++++++--- tools/ds-identify | 17 +++++-- 3 files changed, 91 insertions(+), 14 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index c7b5fe5f..121cf215 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): util.logexc(LOG, "Failed reading config drive from %s", sdir) if not found: - for dev in find_candidate_devs(): + dslist = self.sys_cfg.get('datasource_list') + for dev in find_candidate_devs(dslist=dslist): try: # Set mtype if freebsd and turn off sync if dev.startswith("/dev/cd"): @@ -211,7 +212,7 @@ def write_injected_files(files): util.logexc(LOG, "Failed writing file: %s", filename) -def find_candidate_devs(probe_optical=True): +def find_candidate_devs(probe_optical=True, dslist=None): """Return a list of devices that may contain the config drive. The returned list is sorted by search order where the first item has @@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True): * either vfat or iso9660 formated * labeled with 'config-2' or 'CONFIG-2' """ + if dslist is None: + dslist = [] + # query optical drive to get it in blkid cache for 2.6 kernels if probe_optical: for device in OPTICAL_DEVICES: @@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True): devices = [d for d in candidates if d in by_label or not util.is_partition(d)] - if devices: + LOG.debug("devices=%s dslist=%s", devices, dslist) + if devices and "IBMCloud" in dslist: # IBMCloud uses config-2 label, but limited to a single UUID. ibm_platform, ibm_path = get_ibm_platform() if ibm_path in devices: diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index ad7fe41e..4d8a4360 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -184,17 +184,18 @@ class DsIdentifyBase(CiTestCase): data, RC_FOUND, dslist=[data.get('ds'), DS_NONE]) def _check_via_dict(self, data, rc, dslist=None, **kwargs): - found_rc, out, err, cfg, files = self._call_via_dict(data, **kwargs) + ret = self._call_via_dict(data, **kwargs) good = False try: - self.assertEqual(rc, found_rc) + self.assertEqual(rc, ret.rc) if dslist is not None: - self.assertEqual(dslist, cfg['datasource_list']) + self.assertEqual(dslist, ret.cfg['datasource_list']) good = True finally: if not good: - _print_run_output(rc, out, err, cfg, files) - return rc, out, err, cfg, files + _print_run_output(ret.rc, ret.stdout, ret.stderr, ret.cfg, + ret.files) + return ret class TestDsIdentify(DsIdentifyBase): @@ -245,13 +246,40 @@ class TestDsIdentify(DsIdentifyBase): def test_config_drive(self): """ConfigDrive datasource has a disk with LABEL=config-2.""" self._test_ds_found('ConfigDrive') - return def test_config_drive_upper(self): """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" self._test_ds_found('ConfigDriveUpper') return + def test_config_drive_seed(self): + """Config Drive seed directory.""" + self._test_ds_found('ConfigDrive-seed') + + def test_config_drive_interacts_with_ibmcloud_config_disk(self): + """Verify ConfigDrive interaction with IBMCloud. + + If ConfigDrive is enabled and not IBMCloud, then ConfigDrive + should claim the ibmcloud 'config-2' disk. + If IBMCloud is enabled, then ConfigDrive should skip.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + files = data.get('files', {}) + if not files: + data['files'] = files + cfgpath = 'etc/cloud/cloud.cfg.d/99_networklayer_common.cfg' + + # with list including IBMCloud, config drive should be not found. + files[cfgpath] = 'datasource_list: [ ConfigDrive, IBMCloud ]\n' + ret = self._check_via_dict(data, shell_true) + self.assertEqual( + ret.cfg.get('datasource_list'), ['IBMCloud', 'None']) + + # But if IBMCloud is not enabled, config drive should claim this. + files[cfgpath] = 'datasource_list: [ ConfigDrive, NoCloud ]\n' + ret = self._check_via_dict(data, shell_true) + self.assertEqual( + ret.cfg.get('datasource_list'), ['ConfigDrive', 'None']) + def test_ibmcloud_template_userdata_in_provisioning(self): """Template provisioned with user-data during provisioning stage. @@ -307,6 +335,37 @@ class TestDsIdentify(DsIdentifyBase): self._check_via_dict( data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE]) + def test_ibmcloud_with_nocloud_seed(self): + """NoCloud seed should be preferred over IBMCloud. + + A nocloud seed should be preferred over IBMCloud even if enabled. + Ubuntu 16.04 images have /seed/nocloud-net. LP: #1766401.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + files = data.get('files', {}) + if not files: + data['files'] = files + files.update(VALID_CFG['NoCloud-seed']['files']) + ret = self._check_via_dict(data, shell_true) + self.assertEqual( + ['NoCloud', 'IBMCloud', 'None'], + ret.cfg.get('datasource_list')) + + def test_ibmcloud_with_configdrive_seed(self): + """ConfigDrive seed should be preferred over IBMCloud. + + A ConfigDrive seed should be preferred over IBMCloud even if enabled. + Ubuntu 16.04 images have a fstab entry that mounts the + METADATA disk into /seed/config_drive. LP: ##1766401.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + files = data.get('files', {}) + if not files: + data['files'] = files + files.update(VALID_CFG['ConfigDrive-seed']['files']) + ret = self._check_via_dict(data, shell_true) + self.assertEqual( + ['ConfigDrive', 'IBMCloud', 'None'], + ret.cfg.get('datasource_list')) + def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. @@ -684,6 +743,12 @@ VALID_CFG = { }, ], }, + 'ConfigDrive-seed': { + 'ds': 'ConfigDrive', + 'files': { + os.path.join(P_SEED_DIR, 'config_drive', 'openstack', + 'latest', 'meta_data.json'): 'md\n'}, + }, 'Hetzner': { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, diff --git a/tools/ds-identify b/tools/ds-identify index 7fff5d1e..9f0d96f7 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -601,7 +601,6 @@ dscheck_NoCloud() { *\ ds=nocloud*) return ${DS_FOUND};; esac - is_ibm_cloud && return ${DS_NOT_FOUND} for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} @@ -612,11 +611,12 @@ dscheck_NoCloud() { return ${DS_NOT_FOUND} } +is_ds_enabled() { + local name="$1" pad=" ${DI_DSLIST} " + [ "${pad#* $name }" != "${pad}" ] +} + check_configdrive_v2() { - is_ibm_cloud && return ${DS_NOT_FOUND} - if has_fs_with_label CONFIG-2 config-2; then - return ${DS_FOUND} - fi # look in /config-drive /seed/config_drive for a directory # openstack/YYYY-MM-DD format with a file meta_data.json local d="" @@ -631,6 +631,13 @@ check_configdrive_v2() { debug 1 "config drive seeded directory had only 'latest'" return ${DS_FOUND} fi + + is_ds_enabled "IBMCloud" + debug 1 "is_ds_enabled returned $?: $DI_DSLIST" + is_ds_enabled "IBMCloud" && is_ibm_cloud && return ${DS_NOT_FOUND} + if has_fs_with_label CONFIG-2 config-2; then + return ${DS_FOUND} + fi return ${DS_NOT_FOUND} } -- cgit v1.2.3 From fed07fc6cddebcbc3e744926509c362d9e200aeb Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Tue, 1 May 2018 15:38:51 -0600 Subject: FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. By default, FreeBSD's growfs runs interactively asking a question which can be mitigated using the '-y' command line option. The fix here is simply to pass -y to growfs to avoid the prompt. LP: #1404745 --- cloudinit/config/cc_resizefs.py | 2 +- tests/unittests/test_handler/test_handler_resizefs.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 82f29e10..2edddd0c 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth): def _resize_ufs(mount_point, devpth): - return ('growfs', devpth) + return ('growfs', '-y', devpth) def _resize_zfs(mount_point, devpth): diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 7a7ba1ff..f92175fd 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -147,7 +147,7 @@ class TestResizefs(CiTestCase): def test_resize_ufs_cmd_return(self): mount_point = '/' devpth = '/dev/sda2' - self.assertEqual(('growfs', devpth), + self.assertEqual(('growfs', '-y', devpth), _resize_ufs(mount_point, devpth)) @mock.patch('cloudinit.util.get_mount_info') -- cgit v1.2.3 From aae494c39f4c6f625e7409ca262e657d085dd5d1 Mon Sep 17 00:00:00 2001 From: Joshua Chan Date: Thu, 3 May 2018 14:50:16 -0600 Subject: azure: Add reported ready marker file. This change is for Azure VM Preprovisioning. A bug was found when after azure VMs report ready the first time, during the time when VM is polling indefinitely for the new ovf-env.xml from Instance Metadata Service (IMDS), if a reboot happens, we send another report ready signal to the fabric, which deletes the reprovisioning data on the node. This marker file is used to fix this issue so that we will only send a report ready signal to the fabric when no marker file is present. Then, create a marker file so that when a reboot does occur, we check if a marker file has been created and decide whether we would like to send the repot ready signal. LP: #1765214 --- cloudinit/sources/DataSourceAzure.py | 21 +++- tests/unittests/test_datasource/test_azure.py | 170 ++++++++++++++++++-------- 2 files changed, 134 insertions(+), 57 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a71197a6..1b03d460 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" +REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" @@ -436,11 +437,12 @@ class DataSourceAzure(sources.DataSource): LOG.debug("negotiating already done for %s", self.get_instance_id()) - def _poll_imds(self, report_ready=True): + def _poll_imds(self): """Poll IMDS for the new provisioning data until we get a valid response. Then return the returned JSON object.""" url = IMDS_URL + "?api-version=2017-04-02" headers = {"Metadata": "true"} + report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) LOG.debug("Start polling IMDS") def exc_cb(msg, exception): @@ -450,13 +452,17 @@ class DataSourceAzure(sources.DataSource): # call DHCP and setup the ephemeral network to acquire the new IP. return False - need_report = report_ready while True: try: with EphemeralDHCPv4() as lease: - if need_report: + if report_ready: + path = REPORTED_READY_MARKER_FILE + LOG.info( + "Creating a marker file to report ready: %s", path) + util.write_file(path, "{pid}: {time}\n".format( + pid=os.getpid(), time=time())) self._report_ready(lease=lease) - need_report = False + report_ready = False return readurl(url, timeout=1, headers=headers, exception_cb=exc_cb, infinite=True).contents except UrlError: @@ -490,8 +496,10 @@ class DataSourceAzure(sources.DataSource): if (cfg.get('PreprovisionedVm') is True or os.path.isfile(path)): if not os.path.isfile(path): - LOG.info("Creating a marker file to poll imds") - util.write_file(path, "%s: %s\n" % (os.getpid(), time())) + LOG.info("Creating a marker file to poll imds: %s", + path) + util.write_file(path, "{pid}: {time}\n".format( + pid=os.getpid(), time=time())) return True return False @@ -526,6 +534,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False + util.del_file(REPORTED_READY_MARKER_FILE) util.del_file(REPROVISION_MARKER_FILE) return fabric_data diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 88fe76c7..26e8d7d3 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1125,19 +1125,9 @@ class TestAzureNetExists(CiTestCase): self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) -@mock.patch('cloudinit.sources.DataSourceAzure.util.subp') -@mock.patch.object(dsaz, 'get_hostname') -@mock.patch.object(dsaz, 'set_hostname') -class TestAzureDataSourcePreprovisioning(CiTestCase): - - def setUp(self): - super(TestAzureDataSourcePreprovisioning, self).setUp() - tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) - self.paths = helpers.Paths({'cloud_dir': tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d +class TestPreprovisioningReadAzureOvfFlag(CiTestCase): - def test_read_azure_ovf_with_true_flag(self, *args): + def test_read_azure_ovf_with_true_flag(self): """The read_azure_ovf method should set the PreprovisionedVM cfg flag if the proper setting is present.""" content = construct_valid_ovf_env( @@ -1146,7 +1136,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): cfg = ret[2] self.assertTrue(cfg['PreprovisionedVm']) - def test_read_azure_ovf_with_false_flag(self, *args): + def test_read_azure_ovf_with_false_flag(self): """The read_azure_ovf method should set the PreprovisionedVM cfg flag to false if the proper setting is false.""" content = construct_valid_ovf_env( @@ -1155,7 +1145,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): cfg = ret[2] self.assertFalse(cfg['PreprovisionedVm']) - def test_read_azure_ovf_without_flag(self, *args): + def test_read_azure_ovf_without_flag(self): """The read_azure_ovf method should not set the PreprovisionedVM cfg flag.""" content = construct_valid_ovf_env() @@ -1163,12 +1153,121 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): cfg = ret[2] self.assertFalse(cfg['PreprovisionedVm']) - @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('requests.Session.request') + +@mock.patch('os.path.isfile') +class TestPreprovisioningShouldReprovision(CiTestCase): + + def setUp(self): + super(TestPreprovisioningShouldReprovision, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + def test__should_reprovision_with_true_cfg(self, isfile, write_f): + """The _should_reprovision method should return true with config + flag present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'PreprovisionedVm': True}, None))) + + def test__should_reprovision_with_file_existing(self, isfile): + """The _should_reprovision method should return True if the sentinal + exists.""" + isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'preprovisionedvm': False}, None))) + + def test__should_reprovision_returns_false(self, isfile): + """The _should_reprovision method should return False + if config and sentinal are not present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(dsa._should_reprovision((None, None, {}, None))) + + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + def test_reprovision_calls__poll_imds(self, _poll_imds, isfile): + """_reprovision will poll IMDS.""" + isfile.return_value = False + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + _poll_imds.return_value = construct_valid_ovf_env(data=odata) + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + dsa._reprovision() + _poll_imds.assert_called_with() + + +@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') +@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('requests.Session.request') +@mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') +class TestPreprovisioningPollIMDS(CiTestCase): + + def setUp(self): + super(TestPreprovisioningPollIMDS, self).setUp() + self.tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + def test_poll_imds_calls_report_ready(self, write_f, report_ready_func, + fake_resp, m_dhcp, m_net): + """The poll_imds will call report_ready after creating marker file.""" + report_marker = self.tmp_path('report_marker', self.tmp) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + m_dhcp.return_value = [lease] + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + mock_path = ( + 'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE') + with mock.patch(mock_path, report_marker): + dsa._poll_imds() + self.assertEqual(report_ready_func.call_count, 1) + report_ready_func.assert_called_with(lease=lease) + + def test_poll_imds_report_ready_false(self, report_ready_func, + fake_resp, m_dhcp, m_net): + """The poll_imds should not call reporting ready + when flag is false""" + report_marker = self.tmp_path('report_marker', self.tmp) + write_file(report_marker, content='dont run report_ready :)') + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + mock_path = ( + 'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE') + with mock.patch(mock_path, report_marker): + dsa._poll_imds() + self.assertEqual(report_ready_func.call_count, 0) + + +@mock.patch('cloudinit.sources.DataSourceAzure.util.subp') +@mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') +@mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') +@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') +@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('requests.Session.request') +class TestAzureDataSourcePreprovisioning(CiTestCase): + + def setUp(self): + super(TestAzureDataSourcePreprovisioning, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net, - m_is_bsd, *args): + m_is_bsd, write_f, subp): """The _poll_imds method should return the ovf_env.xml.""" m_is_bsd.return_value = False m_dhcp.return_value = [{ @@ -1194,12 +1293,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): prefix_or_mask='255.255.255.0', router='192.168.2.1') self.assertEqual(m_net.call_count, 1) - @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('requests.Session.request') def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net, - m_is_bsd, *args): + m_is_bsd, write_f, subp): """The _reprovision method should call poll IMDS.""" m_is_bsd.return_value = False m_dhcp.return_value = [{ @@ -1231,32 +1326,5 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): prefix_or_mask='255.255.255.0', router='192.168.2.1') self.assertEqual(m_net.call_count, 1) - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch('os.path.isfile') - def test__should_reprovision_with_true_cfg(self, isfile, write_f, *args): - """The _should_reprovision method should return true with config - flag present.""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'PreprovisionedVm': True}, None))) - - @mock.patch('os.path.isfile') - def test__should_reprovision_with_file_existing(self, isfile, *args): - """The _should_reprovision method should return True if the sentinal - exists.""" - isfile.return_value = True - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'preprovisionedvm': False}, None))) - - @mock.patch('os.path.isfile') - def test__should_reprovision_returns_false(self, isfile, *args): - """The _should_reprovision method should return False - if config and sentinal are not present.""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertFalse(dsa._should_reprovision((None, None, {}, None))) - # vi: ts=4 expandtab -- cgit v1.2.3 From 80dfb3b023a268d6d6204220665c2cf43eac66df Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 4 May 2018 11:18:22 -0600 Subject: pycodestyle: Fix deprecated string literals, move away from flake8. Fix remaining pycodesytle warnings related to invalid string literals introduced in more recent pycodeflakes versions https://bugs.python.org/issue27364 . Also stop using flake8 in tox as it is incompatible with newer versions of pyflakes. Instead we now add tox environments for pycodestyle and pyflakes individually. Set the versions in both pycodestyle and pyflakes to the currently available versions. --- cloudinit/netinfo.py | 2 +- tests/unittests/test_handler/test_handler_ntp.py | 2 +- tox.ini | 16 +++++++++------- 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 1be76fe7..9ff929c2 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -138,7 +138,7 @@ def _netdev_info_ifconfig(ifconfig_data): elif toks[i].startswith("scope:"): devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:") elif toks[i] == "scopeid": - res = re.match(".*<(\S+)>", toks[i + 1]) + res = re.match(r'.*<(\S+)>', toks[i + 1]) if res: devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) return devs diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 17c53559..6da4564e 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -706,7 +706,7 @@ class TestSupplementalSchemaValidation(CiTestCase): cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'} match = (r'Invalid ntp configuration:\\nExpected a list of required' - ' package names for ntp:config:packages. Found \(NOPE\)') + ' package names for ntp:config:packages. Found \\(NOPE\\)') with self.assertRaisesRegex(ValueError, match): cc_ntp.supplemental_schema_validation(cfg) diff --git a/tox.ini b/tox.ini index 818ade3d..2fb3209d 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27, py3, flake8, xenial, pylint +envlist = py27, py3, xenial, pycodestyle, pyflakes, pylint recreate = True [testenv] @@ -7,14 +7,11 @@ commands = python -m nose {posargs:tests/unittests cloudinit} setenv = LC_ALL = en_US.utf-8 -[testenv:flake8] +[testenv:pycodestyle] basepython = python3 deps = - pycodestyle==2.3.1 - pyflakes==1.5.0 - flake8==3.3.0 - hacking==0.13.0 -commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/} + pycodestyle==2.4.0 +commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/} # https://github.com/gabrielfalcao/HTTPretty/issues/223 setenv = @@ -118,6 +115,11 @@ deps = commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/} deps = pycodestyle +[testenv:pyflakes] +commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} +deps = + pyflakes==1.6.0 + [testenv:tip-pyflakes] commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} deps = pyflakes -- cgit v1.2.3 From 23a84d2ce4ec44a0a0c2edbc3b1948c59bb0afbb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 9 May 2018 17:40:56 -0600 Subject: SmartOS: fix get_interfaces for nics that do not have addr_assign_type. When attempting to apply network configuration for SmartOS's container platform, cloud-init would not identify nics. The nics on provided in this container service do not have 'addr_assign_type'. That was being interpreted as being a "stolen" mac, and would be filtered out by get_interfaces. --- cloudinit/net/__init__.py | 8 ++++++-- tests/unittests/test_net.py | 46 +++++++++++++++++++++++++++++++++++++++------ 2 files changed, 46 insertions(+), 8 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 43226bd0..3ffde52c 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -359,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False): 1: randomly generated 3: set using dev_set_mac_address""" assign_type = read_sys_net_int(ifname, "addr_assign_type") - if strict and assign_type is None: - raise ValueError("%s had no addr_assign_type.") + if assign_type is None: + # None is returned if this nic had no 'addr_assign_type' entry. + # if strict, raise an error, if not return True. + if strict: + raise ValueError("%s had no addr_assign_type.") + return True return assign_type in (0, 1, 3) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index fac82678..31807e13 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2,12 +2,9 @@ from cloudinit import net from cloudinit.net import cmdline -from cloudinit.net import eni -from cloudinit.net import natural_sort_key -from cloudinit.net import netplan -from cloudinit.net import network_state -from cloudinit.net import renderers -from cloudinit.net import sysconfig +from cloudinit.net import ( + eni, interface_has_own_mac, natural_sort_key, netplan, network_state, + renderers, sysconfig) from cloudinit.sources.helpers import openstack from cloudinit import temp_utils from cloudinit import util @@ -2691,6 +2688,43 @@ class TestGetInterfaces(CiTestCase): any_order=True) +class TestInterfaceHasOwnMac(CiTestCase): + """Test interface_has_own_mac. This is admittedly a bit whitebox.""" + + @mock.patch('cloudinit.net.read_sys_net_int', return_value=None) + def test_non_strict_with_no_addr_assign_type(self, m_read_sys_net_int): + """If nic does not have addr_assign_type, it is not "stolen". + + SmartOS containers do not provide the addr_assign_type in /sys. + + $ ( cd /sys/class/net/eth0/ && grep -r . *) + address:90:b8:d0:20:e1:b0 + addr_len:6 + flags:0x1043 + ifindex:2 + mtu:1500 + tx_queue_len:1 + type:1 + """ + self.assertTrue(interface_has_own_mac("eth0")) + + @mock.patch('cloudinit.net.read_sys_net_int', return_value=None) + def test_strict_with_no_addr_assign_type_raises(self, m_read_sys_net_int): + with self.assertRaises(ValueError): + interface_has_own_mac("eth0", True) + + @mock.patch('cloudinit.net.read_sys_net_int') + def test_expected_values(self, m_read_sys_net_int): + msg = "address_assign_type=%d said to not have own mac" + for address_assign_type in (0, 1, 3): + m_read_sys_net_int.return_value = address_assign_type + self.assertTrue( + interface_has_own_mac("eth0", msg % address_assign_type)) + + m_read_sys_net_int.return_value = 2 + self.assertFalse(interface_has_own_mac("eth0")) + + class TestGetInterfacesByMac(CiTestCase): _data = {'bonds': ['bond1'], 'bridges': ['bridge1'], -- cgit v1.2.3 From 3b712fcea9ca685c5cb761ea19c5126acf8ffaa1 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Wed, 9 May 2018 20:36:51 -0600 Subject: tests: do not rely on host /proc/cmdline in test_net.py Make test_net.TestGenerateFallbackConfig.test_unstable_names mock the value of /proc/cmdline in the same way as the existing test_unstable_names_disabled test. LP: #1769952 --- tests/unittests/test_net.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'tests/unittests') diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 31807e13..e13ca3ce 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1605,12 +1605,13 @@ iface eth1 inet dhcp ] self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) + @mock.patch("cloudinit.util.get_cmdline") @mock.patch("cloudinit.util.udevadm_settle") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_unstable_names(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path, mock_settle): + mock_sys_dev_path, mock_settle, m_get_cmdline): """verify that udevadm settle is called when we find unstable names""" devices = { 'eth0': { @@ -1626,6 +1627,7 @@ iface eth1 inet dhcp } + m_get_cmdline.return_value = '' tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path, -- cgit v1.2.3 From 0d7ee5592621d09699d079945ffd6febf16669b2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 15 May 2018 13:38:20 -0400 Subject: ds-identify: recognize container-other as a container, test SmartOS. In playing with a SmartOS container I found that ds-identify did not identify the container there as a container. Systemd-detect-virt identifies it as 'container-other'. Also here are tests for ds-identify for the SmartOS platform identification, and some indentation fixes in ds-identify. --- cloudinit/sources/DataSourceSmartOS.py | 4 +-- tests/unittests/test_ds_identify.py | 51 ++++++++++++++++++++++++++++++++-- tools/ds-identify | 12 ++++---- 3 files changed, 57 insertions(+), 10 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 4ea00eb1..fcb46b14 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -745,7 +745,7 @@ def get_smartos_environ(uname_version=None, product_name=None): # report 'BrandZ virtual linux' as the kernel version if uname_version is None: uname_version = uname[3] - if uname_version.lower() == 'brandz virtual linux': + if uname_version == 'BrandZ virtual linux': return SMARTOS_ENV_LX_BRAND if product_name is None: @@ -753,7 +753,7 @@ def get_smartos_environ(uname_version=None, product_name=None): else: system_type = product_name - if system_type and 'smartdc' in system_type.lower(): + if system_type and system_type.startswith('SmartDC'): return SMARTOS_ENV_KVM return None diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 4d8a4360..7f12be59 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -10,7 +10,8 @@ from cloudinit import util from cloudinit.tests.helpers import ( CiTestCase, dir2dict, populate_dir, populate_dir_with_ts) -from cloudinit.sources import DataSourceIBMCloud as dsibm +from cloudinit.sources import DataSourceIBMCloud as ds_ibm +from cloudinit.sources import DataSourceSmartOS as ds_smartos UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") @@ -69,8 +70,12 @@ P_DSID_CFG = "etc/cloud/ds-identify.cfg" IBM_CONFIG_UUID = "9796-932E" +MOCK_VIRT_IS_CONTAINER_OTHER = {'name': 'detect_virt', + 'RET': 'container-other', 'ret': 0} MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} +# currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt. +MOCK_VIRT_IS_VM_OTHER = {'name': 'detect_virt', 'RET': 'vm-other', 'ret': 0} MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0} MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0} @@ -330,7 +335,7 @@ class TestDsIdentify(DsIdentifyBase): break if not offset: raise ValueError("Expected to find 'blkid' mock, but did not.") - data['mocks'][offset]['out'] = d['out'].replace(dsibm.IBM_CONFIG_UUID, + data['mocks'][offset]['out'] = d['out'].replace(ds_ibm.IBM_CONFIG_UUID, "DEAD-BEEF") self._check_via_dict( data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE]) @@ -516,6 +521,20 @@ class TestDsIdentify(DsIdentifyBase): """Hetzner cloud is identified in sys_vendor.""" self._test_ds_found('Hetzner') + def test_smartos_bhyve(self): + """SmartOS cloud identified by SmartDC in dmi.""" + self._test_ds_found('SmartOS-bhyve') + + def test_smartos_lxbrand(self): + """SmartOS cloud identified on lxbrand container.""" + self._test_ds_found('SmartOS-lxbrand') + + def test_smartos_lxbrand_requires_socket(self): + """SmartOS cloud should not be identified if no socket file.""" + mycfg = copy.deepcopy(VALID_CFG['SmartOS-lxbrand']) + del mycfg['files'][ds_smartos.METADATA_SOCKFILE] + self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled") + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -777,7 +796,7 @@ VALID_CFG = { [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(), 'UUID': uuid4(), 'LABEL': 'cloudimg-bootfs'}, {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'config-2', - 'UUID': dsibm.IBM_CONFIG_UUID}, + 'UUID': ds_ibm.IBM_CONFIG_UUID}, {'DEVNAME': 'xvda2', 'TYPE': 'ext4', 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4(), 'UUID': uuid4()}, @@ -798,6 +817,32 @@ VALID_CFG = { }, ], }, + 'SmartOS-bhyve': { + 'ds': 'SmartOS', + 'mocks': [ + MOCK_VIRT_IS_VM_OTHER, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'ext4', + 'PARTUUID': '49ec635a-01'}, + {'DEVNAME': 'vda2', 'TYPE': 'swap', + 'LABEL': 'cloudimg-swap', 'PARTUUID': '49ec635a-02'}]), + }, + ], + 'files': {P_PRODUCT_NAME: 'SmartDC HVM\n'}, + }, + 'SmartOS-lxbrand': { + 'ds': 'SmartOS', + 'mocks': [ + MOCK_VIRT_IS_CONTAINER_OTHER, + {'name': 'uname', 'ret': 0, + 'out': ("Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 " + "BrandZ virtual linux x86_64 GNU/Linux")}, + {'name': 'blkid', 'ret': 2, 'out': ''}, + ], + 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'}, + } + } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 435a5bcb..dc7ac3a7 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -261,7 +261,7 @@ read_virt() { is_container() { case "${DI_VIRT}" in - lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;; + container-other|lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;; *) return 1;; esac } @@ -990,12 +990,14 @@ dscheck_SmartOS() { # joyent cloud has two virt types: kvm and container # on kvm, product name on joyent public cloud shows 'SmartDC HVM' # on the container platform, uname's version has: BrandZ virtual linux + # for container, we also verify that the socketfile exists to protect + # against embedded containers (lxd running on brandz) local smartdc_kver="BrandZ virtual linux" + local metadata_sockfile="${PATH_ROOT}/native/.zonecontrol/metadata.sock" dmi_product_name_matches "SmartDC*" && return $DS_FOUND - if [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] && - [ "${DI_VIRT}" = "container-other" ]; then - return ${DS_FOUND} - fi + [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] && + [ -e "${metadata_sockfile}" ] && + return ${DS_FOUND} return ${DS_NOT_FOUND} } -- cgit v1.2.3 From 30e730f7ca111487d243ba9f40c66df6d7a49953 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 17 May 2018 14:59:54 -0600 Subject: read_file_or_url: move to url_helper, fix bug in its FileResponse. The result of a read_file_or_url on a file and on a url would differ in behavior. str(UrlResponse) would return UrlResponse.contents.decode('utf-8') while str(FileResponse) would return str(FileResponse.contents) The difference being "b'foo'" versus "foo". As part of the general goal of cleaning util, move read_file_or_url into url_helper. --- cloudinit/cmd/main.py | 2 +- cloudinit/config/cc_phone_home.py | 7 +-- cloudinit/config/schema.py | 4 +- cloudinit/ec2_utils.py | 14 +++--- cloudinit/sources/DataSourceMAAS.py | 2 +- cloudinit/sources/helpers/azure.py | 5 ++- cloudinit/tests/test_url_helper.py | 28 +++++++++++- cloudinit/url_helper.py | 29 +++++++++++- cloudinit/user_data.py | 6 +-- cloudinit/util.py | 37 ++-------------- tests/unittests/test__init__.py | 8 ++-- .../unittests/test_datasource/test_azure_helper.py | 2 +- .../test_handler/test_handler_apt_conf_v1.py | 16 +++---- .../test_handler_apt_configure_sources_list_v1.py | 7 --- .../test_handler/test_handler_apt_source_v1.py | 27 +++++------- .../test_handler/test_handler_apt_source_v3.py | 27 +++++------- tests/unittests/test_handler/test_handler_ntp.py | 51 +++++++++------------- 17 files changed, 131 insertions(+), 141 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 3f2dbb93..d6ba90f4 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None): data = None header = b'#cloud-config' try: - resp = util.read_file_or_url(**kwargs) + resp = url_helper.read_file_or_url(**kwargs) if resp.ok(): data = resp.contents if not resp.contents.startswith(header): diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 878069b7..3be0d1c1 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -41,6 +41,7 @@ keys to post. Available keys are: """ from cloudinit import templater +from cloudinit import url_helper from cloudinit import util from cloudinit.settings import PER_INSTANCE @@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args): } url = templater.render_string(url, url_params) try: - util.read_file_or_url(url, data=real_submit_keys, - retries=tries, sec_between=3, - ssl_details=util.fetch_ssl_details(cloud.paths)) + url_helper.read_file_or_url( + url, data=real_submit_keys, retries=tries, sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths)) except Exception: util.logexc(log, "Failed to post phone home data to %s in %s tries", url, tries) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 76826e05..3bad8e22 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -4,7 +4,7 @@ from __future__ import print_function from cloudinit import importer -from cloudinit.util import find_modules, read_file_or_url +from cloudinit.util import find_modules, load_file import argparse from collections import defaultdict @@ -139,7 +139,7 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): """ if not os.path.exists(config_path): raise RuntimeError('Configfile {0} does not exist'.format(config_path)) - content = read_file_or_url('file://{0}'.format(config_path)).contents + content = load_file(config_path, decode=False) if not content.startswith(CLOUD_CONFIG_HEADER): errors = ( ('header', 'File {0} needs to begin with "{1}"'.format( diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index dc3f0fc3..3b7b17f1 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest', # NOT_FOUND occurs) and just in that case returning an empty string. exception_cb = functools.partial(_skip_retry_on_codes, SKIP_USERDATA_CODES) - response = util.read_file_or_url(ud_url, - ssl_details=ssl_details, - timeout=timeout, - retries=retries, - exception_cb=exception_cb) + response = url_helper.read_file_or_url( + ud_url, ssl_details=ssl_details, timeout=timeout, + retries=retries, exception_cb=exception_cb) user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: @@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest', ssl_details=None, timeout=5, retries=5, leaf_decoder=None): md_url = url_helper.combine_url(metadata_address, api_version, tree) - caller = functools.partial(util.read_file_or_url, - ssl_details=ssl_details, timeout=timeout, - retries=retries) + caller = functools.partial( + url_helper.read_file_or_url, ssl_details=ssl_details, + timeout=timeout, retries=retries) def mcaller(url): return caller(url).contents diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index aa56addb..bcb38544 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -198,7 +198,7 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, If version is None, then / will not be used. """ if read_file_or_url is None: - read_file_or_url = util.read_file_or_url + read_file_or_url = url_helper.read_file_or_url if seed_url.endswith("/"): seed_url = seed_url[:-1] diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 90c12df1..e5696b1f 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -14,6 +14,7 @@ from cloudinit import temp_utils from contextlib import contextmanager from xml.etree import ElementTree +from cloudinit import url_helper from cloudinit import util LOG = logging.getLogger(__name__) @@ -55,14 +56,14 @@ class AzureEndpointHttpClient(object): if secure: headers = self.headers.copy() headers.update(self.extra_secure_headers) - return util.read_file_or_url(url, headers=headers) + return url_helper.read_file_or_url(url, headers=headers) def post(self, url, data=None, extra_headers=None): headers = self.headers if extra_headers is not None: headers = self.headers.copy() headers.update(extra_headers) - return util.read_file_or_url(url, data=data, headers=headers) + return url_helper.read_file_or_url(url, data=data, headers=headers) class GoalState(object): diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index b778a3a7..113249d9 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -1,7 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.url_helper import oauth_headers +from cloudinit.url_helper import oauth_headers, read_file_or_url from cloudinit.tests.helpers import CiTestCase, mock, skipIf +from cloudinit import util + +import httpretty try: @@ -38,3 +41,26 @@ class TestOAuthHeaders(CiTestCase): 'url', 'consumer_key', 'token_key', 'token_secret', 'consumer_secret') self.assertEqual('url', return_value) + + +class TestReadFileOrUrl(CiTestCase): + def test_read_file_or_url_str_from_file(self): + """Test that str(result.contents) on file is text version of contents. + It should not be "b'data'", but just "'data'" """ + tmpf = self.tmp_path("myfile1") + data = b'This is my file content\n' + util.write_file(tmpf, data, omode="wb") + result = read_file_or_url("file://%s" % tmpf) + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) + + @httpretty.activate + def test_read_file_or_url_str_from_url(self): + """Test that str(result.contents) on url is text version of contents. + It should not be "b'data'", but just "'data'" """ + url = 'http://hostname/path' + data = b'This is my url content\n' + httpretty.register_uri(httpretty.GET, url, data) + result = read_file_or_url(url) + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 1de07b1c..8067979e 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -15,6 +15,7 @@ import six import time from email.utils import parsedate +from errno import ENOENT from functools import partial from itertools import count from requests import exceptions @@ -80,6 +81,32 @@ def combine_url(base, *add_ons): return url +def read_file_or_url(url, timeout=5, retries=10, + headers=None, data=None, sec_between=1, ssl_details=None, + headers_cb=None, exception_cb=None): + url = url.lstrip() + if url.startswith("/"): + url = "file://%s" % url + if url.lower().startswith("file://"): + if data: + LOG.warning("Unable to post data to file resource %s", url) + file_path = url[len("file://"):] + try: + with open(file_path, "rb") as fp: + contents = fp.read() + except IOError as e: + code = e.errno + if e.errno == ENOENT: + code = NOT_FOUND + raise UrlError(cause=e, code=code, headers=None, url=url) + return FileResponse(file_path, contents=contents) + else: + return readurl(url, timeout=timeout, retries=retries, headers=headers, + headers_cb=headers_cb, data=data, + sec_between=sec_between, ssl_details=ssl_details, + exception_cb=exception_cb) + + # Made to have same accessors as UrlResponse so that the # read_file_or_url can return this or that object and the # 'user' of those objects will not need to know the difference. @@ -96,7 +123,7 @@ class StringResponse(object): return True def __str__(self): - return self.contents + return self.contents.decode('utf-8') class FileResponse(StringResponse): diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index cc55daf8..8f6aba1e 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -19,7 +19,7 @@ import six from cloudinit import handlers from cloudinit import log as logging -from cloudinit.url_helper import UrlError +from cloudinit.url_helper import read_file_or_url, UrlError from cloudinit import util LOG = logging.getLogger(__name__) @@ -224,8 +224,8 @@ class UserDataProcessor(object): content = util.load_file(include_once_fn) else: try: - resp = util.read_file_or_url(include_url, - ssl_details=self.ssl_details) + resp = read_file_or_url(include_url, + ssl_details=self.ssl_details) if include_once_on and resp.ok(): util.write_file(include_once_fn, resp.contents, mode=0o600) diff --git a/cloudinit/util.py b/cloudinit/util.py index fc30018c..edfedc7d 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -857,37 +857,6 @@ def fetch_ssl_details(paths=None): return ssl_details -def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, ssl_details=None, - headers_cb=None, exception_cb=None): - url = url.lstrip() - if url.startswith("/"): - url = "file://%s" % url - if url.lower().startswith("file://"): - if data: - LOG.warning("Unable to post data to file resource %s", url) - file_path = url[len("file://"):] - try: - contents = load_file(file_path, decode=False) - except IOError as e: - code = e.errno - if e.errno == ENOENT: - code = url_helper.NOT_FOUND - raise url_helper.UrlError(cause=e, code=code, headers=None, - url=url) - return url_helper.FileResponse(file_path, contents=contents) - else: - return url_helper.readurl(url, - timeout=timeout, - retries=retries, - headers=headers, - headers_cb=headers_cb, - data=data, - sec_between=sec_between, - ssl_details=ssl_details, - exception_cb=exception_cb) - - def load_yaml(blob, default=None, allowed=(dict,)): loaded = default blob = decode_binary(blob) @@ -925,12 +894,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): ud_url = "%s%s%s" % (base, "user-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) - md_resp = read_file_or_url(md_url, timeout, retries, file_retries) + md_resp = url_helper.read_file_or_url(md_url, timeout, retries, + file_retries) md = None if md_resp.ok(): md = load_yaml(decode_binary(md_resp.contents), default={}) - ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) + ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, + file_retries) ud = None if ud_resp.ok(): ud = ud_resp.contents diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index f1ab02e9..739bbebf 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -182,7 +182,7 @@ class TestCmdlineUrl(CiTestCase): self.assertEqual( ('url', 'http://example.com'), main.parse_cmdline_url(cmdline)) - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') def test_invalid_content(self, m_read): key = "cloud-config-url" url = 'http://example.com/foo' @@ -196,7 +196,7 @@ class TestCmdlineUrl(CiTestCase): self.assertIn(url, msg) self.assertFalse(os.path.exists(fpath)) - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') def test_valid_content(self, m_read): url = "http://example.com/foo" payload = b"#cloud-config\nmydata: foo\nbar: wark\n" @@ -210,7 +210,7 @@ class TestCmdlineUrl(CiTestCase): self.assertEqual(logging.INFO, lvl) self.assertIn(url, msg) - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') def test_no_key_found(self, m_read): cmdline = "ro mykey=http://example.com/foo root=foo" fpath = self.tmp_path("ccpath") @@ -221,7 +221,7 @@ class TestCmdlineUrl(CiTestCase): self.assertFalse(os.path.exists(fpath)) self.assertEqual(logging.DEBUG, lvl) - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') def test_exception_warns(self, m_read): url = "http://example.com/foo" cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index b42b073f..af9d3e1a 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -195,7 +195,7 @@ class TestAzureEndpointHttpClient(CiTestCase): self.addCleanup(patches.close) self.read_file_or_url = patches.enter_context( - mock.patch.object(azure_helper.util, 'read_file_or_url')) + mock.patch.object(azure_helper.url_helper, 'read_file_or_url')) def test_non_secure_get(self): client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/test_handler/test_handler_apt_conf_v1.py index 83f962a9..6a4b03ee 100644 --- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_conf_v1.py @@ -12,10 +12,6 @@ import shutil import tempfile -def load_tfile_or_url(*args, **kwargs): - return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)) - - class TestAptProxyConfig(TestCase): def setUp(self): super(TestAptProxyConfig, self).setUp() @@ -36,7 +32,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = load_tfile_or_url(self.pfile) + contents = util.load_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_http_proxy_written(self): @@ -46,7 +42,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = load_tfile_or_url(self.pfile) + contents = util.load_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_all_proxy_written(self): @@ -64,7 +60,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = load_tfile_or_url(self.pfile) + contents = util.load_file(self.pfile) for ptype, pval in values.items(): self.assertTrue(self._search_apt_config(contents, ptype, pval)) @@ -80,7 +76,7 @@ class TestAptProxyConfig(TestCase): cc_apt_configure.apply_apt_config({'proxy': "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) - contents = load_tfile_or_url(self.pfile) + contents = util.load_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "foo")) def test_config_written(self): @@ -92,14 +88,14 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.cfile)) self.assertFalse(os.path.isfile(self.pfile)) - self.assertEqual(load_tfile_or_url(self.cfile), payload) + self.assertEqual(util.load_file(self.cfile), payload) def test_config_replaced(self): util.write_file(self.pfile, "content doesnt matter") cc_apt_configure.apply_apt_config({'conf': "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.cfile)) - self.assertEqual(load_tfile_or_url(self.cfile), "foo") + self.assertEqual(util.load_file(self.cfile), "foo") def test_config_deleted(self): # if no 'conf' is provided, delete any previously written file diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py index d2b96f0b..23bd6e10 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py @@ -64,13 +64,6 @@ deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted """) -def load_tfile_or_url(*args, **kwargs): - """load_tfile_or_url - load file and return content after decoding - """ - return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents) - - class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): """TestAptSourceConfigSourceList Main Class to test sources list rendering diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py index 46ca4ce4..a3132fbd 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py @@ -39,13 +39,6 @@ S0ORP6HXET3+jC8BMG4tBWCTK/XEZw== ADD_APT_REPO_MATCH = r"^[\w-]+:\w" -def load_tfile_or_url(*args, **kwargs): - """load_tfile_or_url - load file and return content after decoding - """ - return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents) - - class FakeDistro(object): """Fake Distro helper object""" def update_package_sources(self): @@ -125,7 +118,7 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile_or_url(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://archive.ubuntu.com/ubuntu", "karmic-backports", @@ -157,13 +150,13 @@ class TestAptSourceConfig(TestCase): self.apt_src_basic(self.aptlistfile, cfg) # extra verify on two extra files of this test - contents = load_tfile_or_url(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://archive.ubuntu.com/ubuntu", "precise-backports", "main universe multiverse restricted"), contents, flags=re.IGNORECASE)) - contents = load_tfile_or_url(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://archive.ubuntu.com/ubuntu", "lucid-backports", @@ -220,7 +213,7 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile_or_url(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "multiverse"), @@ -241,12 +234,12 @@ class TestAptSourceConfig(TestCase): # extra verify on two extra files of this test params = self._get_default_params() - contents = load_tfile_or_url(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "main"), contents, flags=re.IGNORECASE)) - contents = load_tfile_or_url(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "universe"), @@ -296,7 +289,7 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile_or_url(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' @@ -336,14 +329,14 @@ class TestAptSourceConfig(TestCase): 'filename': self.aptlistfile3} self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3) - contents = load_tfile_or_url(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' 'cloud-init-test/ubuntu'), "xenial", "universe"), contents, flags=re.IGNORECASE)) - contents = load_tfile_or_url(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' @@ -375,7 +368,7 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile_or_url(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index e486862d..7a64c230 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -49,13 +49,6 @@ ADD_APT_REPO_MATCH = r"^[\w-]+:\w" TARGET = None -def load_tfile(*args, **kwargs): - """load_tfile_or_url - load file and return content after decoding - """ - return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents) - - class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """TestAptSourceConfig Main Class to test apt configs @@ -119,7 +112,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://test.ubuntu.com/ubuntu", "karmic-backports", @@ -151,13 +144,13 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self._apt_src_basic(self.aptlistfile, cfg) # extra verify on two extra files of this test - contents = load_tfile(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://test.ubuntu.com/ubuntu", "precise-backports", "main universe multiverse restricted"), contents, flags=re.IGNORECASE)) - contents = load_tfile(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://test.ubuntu.com/ubuntu", "lucid-backports", @@ -174,7 +167,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "multiverse"), @@ -201,12 +194,12 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): # extra verify on two extra files of this test params = self._get_default_params() - contents = load_tfile(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "main"), contents, flags=re.IGNORECASE)) - contents = load_tfile(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "universe"), @@ -240,7 +233,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' @@ -277,14 +270,14 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'keyid': "03683F77"}} self._apt_src_keyid(self.aptlistfile, cfg, 3) - contents = load_tfile(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' 'cloud-init-test/ubuntu'), "xenial", "universe"), contents, flags=re.IGNORECASE)) - contents = load_tfile(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' @@ -310,7 +303,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertTrue(os.path.isfile(self.aptlistfile)) - contents = load_tfile(self.aptlistfile) + contents = util.load_file(self.aptlistfile) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 6da4564e..6fe3659d 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -155,9 +155,9 @@ class TestNtp(FilesystemMockingTestCase): path=confpath, template_fn=template_fn, template=None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( - "servers []\npools ['10.0.0.1', '10.0.0.2']\n", content.decode()) + "servers []\npools ['10.0.0.1', '10.0.0.2']\n", + util.load_file(confpath)) def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): """write_ntp_config_template defaults pools servers upon empty config. @@ -176,10 +176,9 @@ class TestNtp(FilesystemMockingTestCase): path=confpath, template_fn=template_fn, template=None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools {0}\n".format(pools), - content.decode()) + util.load_file(confpath)) def test_defaults_pools_empty_lists_sles(self): """write_ntp_config_template defaults opensuse pools upon empty config. @@ -196,11 +195,11 @@ class TestNtp(FilesystemMockingTestCase): path=confpath, template_fn=template_fn, template=None) - content = util.read_file_or_url('file://' + confpath).contents for pool in default_pools: self.assertIn('opensuse', pool) self.assertEqual( - "servers []\npools {0}\n".format(default_pools), content.decode()) + "servers []\npools {0}\n".format(default_pools), + util.load_file(confpath)) self.assertIn( "Adding distro default ntp pool servers: {0}".format( ",".join(default_pools)), @@ -217,10 +216,9 @@ class TestNtp(FilesystemMockingTestCase): path=confpath, template_fn=template_fn, template=None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), - content.decode()) + util.load_file(confpath)) def test_distro_ntp_client_configs(self): """Test we have updated ntp client configs on different distros""" @@ -267,17 +265,17 @@ class TestNtp(FilesystemMockingTestCase): cc_ntp.write_ntp_config_template(distro, servers=servers, pools=pools, path=confpath, template_fn=template_fn) - content = util.read_file_or_url('file://' + confpath).contents + content = util.load_file(confpath) if client in ['ntp', 'chrony']: expected_servers = '\n'.join([ 'server {0} iburst'.format(srv) for srv in servers]) print('distro=%s client=%s' % (distro, client)) - self.assertIn(expected_servers, content.decode('utf-8'), + self.assertIn(expected_servers, content, ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) expected_pools = '\n'.join([ 'pool {0} iburst'.format(pool) for pool in pools]) - self.assertIn(expected_pools, content.decode('utf-8'), + self.assertIn(expected_pools, content, ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) elif client == 'systemd-timesyncd': @@ -286,7 +284,7 @@ class TestNtp(FilesystemMockingTestCase): "# See timesyncd.conf(5) for details.\n\n" + "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools))) - self.assertEqual(expected_content, content.decode()) + self.assertEqual(expected_content, content) def test_no_ntpcfg_does_nothing(self): """When no ntp section is defined handler logs a warning and noops.""" @@ -308,10 +306,10 @@ class TestNtp(FilesystemMockingTestCase): confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) - content = util.read_file_or_url('file://' + confpath).contents pools = cc_ntp.generate_server_names(mycloud.distro.name) self.assertEqual( - "servers []\npools {0}\n".format(pools), content.decode()) + "servers []\npools {0}\n".format(pools), + util.load_file(confpath)) self.assertNotIn('Invalid config:', self.logs.getvalue()) @skipUnlessJsonSchema() @@ -333,9 +331,8 @@ class TestNtp(FilesystemMockingTestCase): "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" "ntp.servers.1: None is not of type 'string'", self.logs.getvalue()) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers ['valid', None]\npools [123]\n", - content.decode()) + util.load_file(confpath)) @skipUnlessJsonSchema() @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') @@ -357,9 +354,8 @@ class TestNtp(FilesystemMockingTestCase): "Invalid config:\nntp.pools: 123 is not of type 'array'\n" "ntp.servers: 'non-array' is not of type 'array'", self.logs.getvalue()) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers non-array\npools 123\n", - content.decode()) + util.load_file(confpath)) @skipUnlessJsonSchema() @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') @@ -381,10 +377,9 @@ class TestNtp(FilesystemMockingTestCase): "Invalid config:\nntp: Additional properties are not allowed " "('invalidkey' was unexpected)", self.logs.getvalue()) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools ['0.mycompany.pool.ntp.org']\n", - content.decode()) + util.load_file(confpath)) @skipUnlessJsonSchema() @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') @@ -407,10 +402,10 @@ class TestNtp(FilesystemMockingTestCase): " has non-unique elements\nntp.servers: " "['10.0.0.1', '10.0.0.1'] has non-unique elements", self.logs.getvalue()) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers ['10.0.0.1', '10.0.0.1']\n" - "pools ['0.mypool.org', '0.mypool.org']\n", content.decode()) + "pools ['0.mypool.org', '0.mypool.org']\n", + util.load_file(confpath)) @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') def test_ntp_handler_timesyncd(self, m_select): @@ -426,10 +421,9 @@ class TestNtp(FilesystemMockingTestCase): confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", - content.decode()) + util.load_file(confpath)) @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') def test_ntp_handler_enabled_false(self, m_select): @@ -466,10 +460,9 @@ class TestNtp(FilesystemMockingTestCase): m_util.subp.assert_called_with( ['systemctl', 'reload-or-restart', service_name], capture=True) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools {0}\n".format(pools), - content.decode()) + util.load_file(confpath)) def test_opensuse_picks_chrony(self): """Test opensuse picks chrony or ntp on certain distro versions""" @@ -638,10 +631,9 @@ class TestNtp(FilesystemMockingTestCase): mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' with mock.patch(mock_path, self.new_root): cc_ntp.handle('notimportant', cfg, mycloud, None, None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, - content.decode()) + util.load_file(confpath)) @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') @mock.patch('cloudinit.config.cc_ntp.reload_ntp') @@ -675,10 +667,9 @@ class TestNtp(FilesystemMockingTestCase): with mock.patch(mock_path, self.new_root): cc_ntp.handle('notimportant', {'ntp': cfg}, mycloud, None, None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, - content.decode()) + util.load_file(confpath)) m_schema.assert_called_with(expected_merged_cfg) -- cgit v1.2.3 From b4ae0e1fb8a48a83ea325cf032eb1acb196ee31c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 22 May 2018 10:07:59 -0400 Subject: ds-identify: ensure that we have certain tokens in PATH. SuSE builds were not getting a PATH set in generator's environment. This may seem like mis-configuration on the system, but caused ds-identify to fail to find blkid (or any other program). The change here just ensures that we get /sbin /usr/sbin /bin /usr/bin into the PATH when main is run. LP: #1771382 --- tests/unittests/test_ds_identify.py | 23 ++++++++++++++++++++++- tools/ds-identify | 11 +++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) (limited to 'tests/unittests') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7f12be59..64d9f9f8 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -175,7 +175,9 @@ class DsIdentifyBase(CiTestCase): def _call_via_dict(self, data, rootd=None, **kwargs): # return output of self.call with a dict input like VALID_CFG[item] xwargs = {'rootd': rootd} - for k in ('mocks', 'args', 'policy_dmi', 'policy_no_dmi', 'files'): + passthrough = ('mocks', 'func', 'args', 'policy_dmi', + 'policy_no_dmi', 'files') + for k in passthrough: if k in data: xwargs[k] = data[k] if k in kwargs: @@ -535,6 +537,25 @@ class TestDsIdentify(DsIdentifyBase): del mycfg['files'][ds_smartos.METADATA_SOCKFILE] self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled") + def test_path_env_gets_set_from_main(self): + """PATH environment should always have some tokens when main is run. + + We explicitly call main as we want to ensure it updates PATH.""" + cust = copy.deepcopy(VALID_CFG['NoCloud']) + rootd = self.tmp_dir() + mpp = 'main-printpath' + pre = "MYPATH=" + cust['files'][mpp] = ( + 'PATH="/mycust/path"; main; r=$?; echo ' + pre + '$PATH; exit $r;') + ret = self._check_via_dict( + cust, RC_FOUND, + func=".", args=[os.path.join(rootd, mpp)], rootd=rootd) + line = [l for l in ret.stdout.splitlines() if l.startswith(pre)][0] + toks = line.replace(pre, "").split(":") + expected = ["/sbin", "/bin", "/usr/sbin", "/usr/bin", "/mycust/path"] + self.assertEqual(expected, [p for p in expected if p in toks], + "path did not have expected tokens") + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" diff --git a/tools/ds-identify b/tools/ds-identify index dc7ac3a7..ce0477a5 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -187,6 +187,16 @@ block_dev_with_label() { return 0 } +ensure_sane_path() { + local t + for t in /sbin /usr/sbin /bin /usr/bin; do + case ":$PATH:" in + *:$t:*|*:$t/:*) continue;; + esac + PATH="${PATH:+${PATH}:}$t" + done +} + read_fs_info() { cached "${DI_BLKID_OUTPUT}" && return 0 # do not rely on links in /dev/disk which might not be present yet. @@ -1464,6 +1474,7 @@ _main() { main() { local ret="" + ensure_sane_path [ -d "$PATH_RUN_CI" ] || mkdir -p "$PATH_RUN_CI" if [ "${1:+$1}" != "--force" ] && [ -f "$PATH_RUN_CI_CFG" ] && [ -f "$PATH_RUN_DI_RESULT" ]; then -- cgit v1.2.3 From 529d48f69d3784b2314397f5eab9d750ab03cf6a Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Tue, 22 May 2018 10:55:04 -0400 Subject: cc_mounts: Do not add devices to fstab that are already present. Do not add new entries to /etc/fstab for devices that already have an existing fstab entry. Resolves: rhbz#1542578 --- cloudinit/config/cc_mounts.py | 53 +++++++---- cloudinit/tests/helpers.py | 4 +- .../unittests/test_handler/test_handler_mounts.py | 104 ++++++++++++++++++++- 3 files changed, 136 insertions(+), 25 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index b3b25c3b..eca6ea3f 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) FSTAB_PATH = "/etc/fstab" +MNT_COMMENT = "comment=cloudconfig" LOG = logging.getLogger(__name__) @@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args): LOG.debug("mounts configuration is %s", cfgmnt) + fstab_lines = [] + fstab_devs = {} + fstab_removed = [] + + for line in util.load_file(FSTAB_PATH).splitlines(): + if MNT_COMMENT in line: + fstab_removed.append(line) + continue + + try: + toks = WS.split(line) + except Exception: + pass + fstab_devs[toks[0]] = line + fstab_lines.append(line) + for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): @@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args): start = str(cfgmnt[i][0]) sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + if sanitized != start: + log.debug("changed %s => %s" % (start, sanitized)) + if sanitized is None: log.debug("Ignoring nonexistent named mount %s", start) continue + elif sanitized in fstab_devs: + log.info("Device %s already defined in fstab: %s", + sanitized, fstab_devs[sanitized]) + continue - if sanitized != start: - log.debug("changed %s => %s" % (start, sanitized)) cfgmnt[i][0] = sanitized # in case the user did not quote a field (likely fs-freq, fs_passno) @@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args): for defmnt in defmnts: start = defmnt[0] sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + if sanitized != start: + log.debug("changed default device %s => %s" % (start, sanitized)) + if sanitized is None: log.debug("Ignoring nonexistent default named mount %s", start) continue - if sanitized != start: - log.debug("changed default device %s => %s" % (start, sanitized)) + elif sanitized in fstab_devs: + log.debug("Device %s already defined in fstab: %s", + sanitized, fstab_devs[sanitized]) + continue + defmnt[0] = sanitized cfgmnt_has = False @@ -409,31 +437,18 @@ def handle(_name, cfg, cloud, log, _args): log.debug("No modifications to fstab needed") return - comment = "comment=cloudconfig" cc_lines = [] needswap = False dirs = [] for line in actlist: # write 'comment' in the fs_mntops, entry, claiming this - line[3] = "%s,%s" % (line[3], comment) + line[3] = "%s,%s" % (line[3], MNT_COMMENT) if line[2] == "swap": needswap = True if line[1].startswith("/"): dirs.append(line[1]) cc_lines.append('\t'.join(line)) - fstab_lines = [] - removed = [] - for line in util.load_file(FSTAB_PATH).splitlines(): - try: - toks = WS.split(line) - if toks[3].find(comment) != -1: - removed.append(line) - continue - except Exception: - pass - fstab_lines.append(line) - for d in dirs: try: util.ensure_dir(d) @@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Failed to make '%s' config-mount", d) sadds = [WS.sub(" ", n) for n in cc_lines] - sdrops = [WS.sub(" ", n) for n in removed] + sdrops = [WS.sub(" ", n) for n in fstab_removed] sops = (["- " + drop for drop in sdrops if drop not in sadds] + ["+ " + add for add in sadds if add not in sdrops]) diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 117a9cfe..07059fd4 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -111,12 +111,12 @@ class TestCase(unittest2.TestCase): super(TestCase, self).setUp() self.reset_global_state() - def add_patch(self, target, attr, **kwargs): + def add_patch(self, target, attr, *args, **kwargs): """Patches specified target object and sets it as attr on test instance also schedules cleanup""" if 'autospec' not in kwargs: kwargs['autospec'] = True - m = mock.patch(target, **kwargs) + m = mock.patch(target, *args, **kwargs) p = m.start() self.addCleanup(m.stop) setattr(self, attr, p) diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py index fe492d4b..8fea6c2a 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -1,8 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. import os.path -import shutil -import tempfile from cloudinit.config import cc_mounts @@ -18,8 +16,7 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): def setUp(self): super(TestSanitizeDevname, self).setUp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) + self.new_root = self.tmp_dir() self.patchOS(self.new_root) def _touch(self, path): @@ -134,4 +131,103 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): cc_mounts.sanitize_devname( 'ephemeral0.1', lambda x: disk_path, mock.Mock())) + +class TestFstabHandling(test_helpers.FilesystemMockingTestCase): + + swap_path = '/dev/sdb1' + + def setUp(self): + super(TestFstabHandling, self).setUp() + self.new_root = self.tmp_dir() + self.patchOS(self.new_root) + + self.fstab_path = os.path.join(self.new_root, 'etc/fstab') + self._makedirs('/etc') + + self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', + 'mock_fstab_path', + self.fstab_path, + autospec=False) + + self.add_patch('cloudinit.config.cc_mounts._is_block_device', + 'mock_is_block_device', + return_value=True) + + self.add_patch('cloudinit.config.cc_mounts.util.subp', + 'mock_util_subp') + + self.mock_cloud = mock.Mock() + self.mock_log = mock.Mock() + self.mock_cloud.device_name_to_device = self.device_name_to_device + + def _makedirs(self, directory): + directory = os.path.join(self.new_root, directory.lstrip('/')) + if not os.path.exists(directory): + os.makedirs(directory) + + def device_name_to_device(self, path): + if path == 'swap': + return self.swap_path + else: + dev = None + + return dev + + def test_fstab_no_swap_device(self): + '''Ensure that cloud-init adds a discovered swap partition + to /etc/fstab.''' + + fstab_original_content = '' + fstab_expected_content = ( + '%s\tnone\tswap\tsw,comment=cloudconfig\t' + '0\t0\n' % (self.swap_path,) + ) + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + + def test_fstab_same_swap_device_already_configured(self): + '''Ensure that cloud-init will not add a swap device if the same + device already exists in /etc/fstab.''' + + fstab_original_content = '%s swap swap defaults 0 0\n' % ( + self.swap_path,) + fstab_expected_content = fstab_original_content + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + + def test_fstab_alternate_swap_device_already_configured(self): + '''Ensure that cloud-init will add a discovered swap device to + /etc/fstab even when there exists a swap definition on another + device.''' + + fstab_original_content = '/dev/sdc1 swap swap defaults 0 0\n' + fstab_expected_content = ( + fstab_original_content + + '%s\tnone\tswap\tsw,comment=cloudconfig\t' + '0\t0\n' % (self.swap_path,) + ) + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + # vi: ts=4 expandtab -- cgit v1.2.3 From 5446c788160412189200c6cc688b14c9f9071943 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 22 May 2018 16:06:41 -0400 Subject: Update version.version_string to contain packaged version. This modifies version.version_string to support having the package build write the *packaged* version in with a easy replace. Then, when cloud-init reports its version it will include the full packaged version. Also modified here are upstream package build files to get that done. Note part of the trickery in packages/debian/rules.in was to avoid the 'basic' templater consuming the '$variable' variable names. LP: #1770712 --- cloudinit/tests/test_version.py | 31 +++++++++++++++++++++++++++++++ cloudinit/version.py | 4 ++++ packages/debian/rules.in | 2 ++ tests/unittests/test_version.py | 14 -------------- 4 files changed, 37 insertions(+), 14 deletions(-) create mode 100644 cloudinit/tests/test_version.py delete mode 100644 tests/unittests/test_version.py (limited to 'tests/unittests') diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py new file mode 100644 index 00000000..a96c2a47 --- /dev/null +++ b/cloudinit/tests/test_version.py @@ -0,0 +1,31 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.tests.helpers import CiTestCase +from cloudinit import version + +import mock + + +class TestExportsFeatures(CiTestCase): + def test_has_network_config_v1(self): + self.assertIn('NETWORK_CONFIG_V1', version.FEATURES) + + def test_has_network_config_v2(self): + self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) + + +class TestVersionString(CiTestCase): + @mock.patch("cloudinit.version._PACKAGED_VERSION", + "17.2-3-gb05b9972-0ubuntu1") + def test_package_version_respected(self): + """If _PACKAGED_VERSION is filled in, then it should be returned.""" + self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string()) + + @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@") + @mock.patch("cloudinit.version.__VERSION__", "17.2") + def test_package_version_skipped(self): + """If _PACKAGED_VERSION is not modified, then return __VERSION__.""" + self.assertEqual("17.2", version.version_string()) + + +# vi: ts=4 expandtab diff --git a/cloudinit/version.py b/cloudinit/version.py index ccd0f84e..ce3b8c1e 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -5,6 +5,7 @@ # This file is part of cloud-init. See LICENSE file for license information. __VERSION__ = "18.2" +_PACKAGED_VERSION = '@@PACKAGED_VERSION@@' FEATURES = [ # supports network config version 1 @@ -15,6 +16,9 @@ FEATURES = [ def version_string(): + """Extract a version string from cloud-init.""" + if not _PACKAGED_VERSION.startswith('@@'): + return _PACKAGED_VERSION return __VERSION__ # vi: ts=4 expandtab diff --git a/packages/debian/rules.in b/packages/debian/rules.in index 4aa907e3..e542c7f1 100755 --- a/packages/debian/rules.in +++ b/packages/debian/rules.in @@ -3,6 +3,7 @@ INIT_SYSTEM ?= systemd export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM) PYVER ?= python${pyver} +DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version) %: dh $@ --with $(PYVER),systemd --buildsystem pybuild @@ -14,6 +15,7 @@ override_dh_install: cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh + flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement} override_dh_auto_test: ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) diff --git a/tests/unittests/test_version.py b/tests/unittests/test_version.py deleted file mode 100644 index d012f69d..00000000 --- a/tests/unittests/test_version.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import CiTestCase -from cloudinit import version - - -class TestExportsFeatures(CiTestCase): - def test_has_network_config_v1(self): - self.assertIn('NETWORK_CONFIG_V1', version.FEATURES) - - def test_has_network_config_v2(self): - self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) - -# vi: ts=4 expandtab -- cgit v1.2.3 From aa4eeb80839382117e1813e396dc53aa634fd7ba Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 23 May 2018 15:45:39 -0400 Subject: Azure: Ignore NTFS mount errors when checking ephemeral drive The Azure data source provides a method to check whether a NTFS partition on the ephemeral disk is safe for reformatting to ext4. The method checks to see if there are customer data files on the disk. However, mounting the partition fails on systems that do not have the capability of mounting NTFS. Note that in this case, it is also very unlikely that the NTFS partition would have been used by the system (since it can't mount it). The only case would be where an update to the system removed the capability to mount NTFS, the likelihood of which is also very small. This change allows the reformatting of the ephemeral disk to ext4 on systems where mounting NTFS is not supported. --- cloudinit/sources/DataSourceAzure.py | 63 ++++++++++++---- cloudinit/util.py | 5 +- tests/unittests/test_datasource/test_azure.py | 105 +++++++++++++++++++++----- 3 files changed, 138 insertions(+), 35 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 1b03d460..7007d9ea 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -208,6 +208,7 @@ BUILTIN_CLOUD_CONFIG = { } DS_CFG_PATH = ['datasource', DS_NAME] +DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs' DEF_EPHEMERAL_LABEL = 'Temporary Storage' # The redacted password fails to meet password complexity requirements @@ -394,14 +395,9 @@ class DataSourceAzure(sources.DataSource): if found == ddir: LOG.debug("using files cached in %s", ddir) - # azure / hyper-v provides random data here - # TODO. find the seed on FreeBSD platform - # now update ds_cfg to reflect contents pass in config - if not util.is_FreeBSD(): - seed = util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) - if seed: - self.metadata['random_seed'] = seed + seed = _get_random_seed() + if seed: + self.metadata['random_seed'] = seed user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) @@ -539,7 +535,9 @@ class DataSourceAzure(sources.DataSource): return fabric_data def activate(self, cfg, is_new_instance): - address_ephemeral_resize(is_new_instance=is_new_instance) + address_ephemeral_resize(is_new_instance=is_new_instance, + preserve_ntfs=self.ds_cfg.get( + DS_CFG_KEY_PRESERVE_NTFS, False)) return @property @@ -583,17 +581,29 @@ def _has_ntfs_filesystem(devpath): return os.path.realpath(devpath) in ntfs_devices -def can_dev_be_reformatted(devpath): - """Determine if block device devpath is newly formatted ephemeral. +def can_dev_be_reformatted(devpath, preserve_ntfs): + """Determine if the ephemeral drive at devpath should be reformatted. - A newly formatted disk will: + A fresh ephemeral disk is formatted by Azure and will: a.) have a partition table (dos or gpt) b.) have 1 partition that is ntfs formatted, or have 2 partitions with the second partition ntfs formatted. (larger instances with >2TB ephemeral disk have gpt, and will have a microsoft reserved partition as part 1. LP: #1686514) c.) the ntfs partition will have no files other than possibly - 'dataloss_warning_readme.txt'""" + 'dataloss_warning_readme.txt' + + User can indicate that NTFS should never be destroyed by setting + DS_CFG_KEY_PRESERVE_NTFS in dscfg. + If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS + to make sure cloud-init does not accidentally wipe their data. + If cloud-init cannot mount the disk to check for data, destruction + will be allowed, unless the dscfg key is set.""" + if preserve_ntfs: + msg = ('config says to never destroy NTFS (%s.%s), skipping checks' % + (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS)) + return False, msg + if not os.path.exists(devpath): return False, 'device %s does not exist' % devpath @@ -626,18 +636,27 @@ def can_dev_be_reformatted(devpath): bmsg = ('partition %s (%s) on device %s was ntfs formatted' % (cand_part, cand_path, devpath)) try: - file_count = util.mount_cb(cand_path, count_files) + file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", + update_env_for_mount={'LANG': 'C'}) except util.MountFailedError as e: + if "mount: unknown filesystem type 'ntfs'" in str(e): + return True, (bmsg + ' but this system cannot mount NTFS,' + ' assuming there are no important files.' + ' Formatting allowed.') return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) if file_count != 0: + LOG.warning("it looks like you're using NTFS on the ephemeral disk, " + 'to ensure that filesystem does not get wiped, set ' + '%s.%s in config', '.'.join(DS_CFG_PATH), + DS_CFG_KEY_PRESERVE_NTFS) return False, bmsg + ' but had %d files on it.' % file_count return True, bmsg + ' and had no important files. Safe for reformatting.' def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, - is_new_instance=False): + is_new_instance=False, preserve_ntfs=False): # wait for ephemeral disk to come up naplen = .2 missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, @@ -653,7 +672,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, if is_new_instance: result, msg = (True, "First instance boot.") else: - result, msg = can_dev_be_reformatted(devpath) + result, msg = can_dev_be_reformatted(devpath, preserve_ntfs) LOG.debug("reformattable=%s: %s", result, msg) if not result: @@ -967,6 +986,18 @@ def _check_freebsd_cdrom(cdrom_dev): return False +def _get_random_seed(): + """Return content random seed file if available, otherwise, + return None.""" + # azure / hyper-v provides random data here + # TODO. find the seed on FreeBSD platform + # now update ds_cfg to reflect contents pass in config + if util.is_FreeBSD(): + return None + return util.load_file("/sys/firmware/acpi/tables/OEM0", + quiet=True, decode=False) + + def list_possible_azure_ds_devs(): devlist = [] if util.is_FreeBSD(): diff --git a/cloudinit/util.py b/cloudinit/util.py index edfedc7d..653ed6ea 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1581,7 +1581,8 @@ def mounts(): return mounted -def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): +def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, + update_env_for_mount=None): """ Mount the device, call method 'callback' passing the directory in which it was mounted, then unmount. Return whatever 'callback' @@ -1643,7 +1644,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): mountcmd.extend(['-t', mtype]) mountcmd.append(device) mountcmd.append(tmpd) - subp(mountcmd) + subp(mountcmd, update_env=update_env_for_mount) umount = tmpd # This forces it to be unmounted (when set) mountpoint = tmpd break diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 26e8d7d3..e82716eb 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,10 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import helpers -from cloudinit.util import b64e, decode_binary, load_file, write_file from cloudinit.sources import DataSourceAzure as dsaz -from cloudinit.util import find_freebsd_part -from cloudinit.util import get_path_dev_freebsd +from cloudinit.util import (b64e, decode_binary, load_file, write_file, + find_freebsd_part, get_path_dev_freebsd, + MountFailedError) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, ExitStack, PY26, SkipTest) @@ -95,6 +95,8 @@ class TestAzureDataSource(CiTestCase): self.patches = ExitStack() self.addCleanup(self.patches.close) + self.patches.enter_context(mock.patch.object(dsaz, '_get_random_seed')) + super(TestAzureDataSource, self).setUp() def apply_patches(self, patches): @@ -335,6 +337,18 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertTrue(ret) self.assertEqual(data['agent_invoked'], '_COMMAND') + def test_sys_cfg_set_never_destroy_ntfs(self): + sys_cfg = {'datasource': {'Azure': { + 'never_destroy_ntfs': 'user-supplied-value'}}} + data = {'ovfcontent': construct_valid_ovf_env(data={}), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), + 'user-supplied-value') + def test_username_used(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} @@ -676,6 +690,8 @@ class TestAzureBounce(CiTestCase): mock.MagicMock(return_value={}))) self.patches.enter_context( mock.patch.object(dsaz.util, 'which', lambda x: True)) + self.patches.enter_context( + mock.patch.object(dsaz, '_get_random_seed')) def _dmi_mocks(key): if key == 'system-uuid': @@ -957,7 +973,9 @@ class TestCanDevBeReformatted(CiTestCase): # return sorted by partition number return sorted(ret, key=lambda d: d[0]) - def mount_cb(device, callback): + def mount_cb(device, callback, mtype, update_env_for_mount): + self.assertEqual('ntfs', mtype) + self.assertEqual('C', update_env_for_mount.get('LANG')) p = self.tmp_dir() for f in bypath.get(device).get('files', []): write_file(os.path.join(p, f), content=f) @@ -988,14 +1006,16 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2}, '/dev/sda3': {'num': 3}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("3 or more", msg.lower()) def test_no_partitions_is_false(self): """A disk with no partitions can not be formatted.""" self.patchup({'/dev/sda': {}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("not partitioned", msg.lower()) @@ -1007,7 +1027,8 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1}, '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) @@ -1020,7 +1041,8 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': ['secret.txt']}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("files on it", msg.lower()) @@ -1032,7 +1054,8 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1}, '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertTrue(value) self.assertIn("safe for", msg.lower()) @@ -1043,7 +1066,8 @@ class TestCanDevBeReformatted(CiTestCase): 'partitions': { '/dev/sda1': {'num': 1, 'fs': 'zfs'}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) @@ -1055,9 +1079,14 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': ['file1.txt', 'file2.exe']}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(value) - self.assertIn("files on it", msg.lower()) + with mock.patch.object(dsaz.LOG, 'warning') as warning: + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + wmsg = warning.call_args[0][0] + self.assertIn("looks like you're using NTFS on the ephemeral disk", + wmsg) + self.assertFalse(value) + self.assertIn("files on it", msg.lower()) def test_one_partition_ntfs_empty_is_true(self): """1 mountable ntfs partition and no files can be formatted.""" @@ -1066,7 +1095,8 @@ class TestCanDevBeReformatted(CiTestCase): 'partitions': { '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertTrue(value) self.assertIn("safe for", msg.lower()) @@ -1078,7 +1108,8 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': ['dataloss_warning_readme.txt']} }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertTrue(value) self.assertIn("safe for", msg.lower()) @@ -1093,7 +1124,8 @@ class TestCanDevBeReformatted(CiTestCase): 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], 'realpath': '/dev/sdb1'} }}}) - value, msg = dsaz.can_dev_be_reformatted(epath) + value, msg = dsaz.can_dev_be_reformatted(epath, + preserve_ntfs=False) self.assertTrue(value) self.assertIn("safe for", msg.lower()) @@ -1112,10 +1144,49 @@ class TestCanDevBeReformatted(CiTestCase): epath + '-part3': {'num': 3, 'fs': 'ext', 'realpath': '/dev/sdb3'} }}}) - value, msg = dsaz.can_dev_be_reformatted(epath) + value, msg = dsaz.can_dev_be_reformatted(epath, + preserve_ntfs=False) self.assertFalse(value) self.assertIn("3 or more", msg.lower()) + def test_ntfs_mount_errors_true(self): + """can_dev_be_reformatted does not fail if NTFS is unknown fstype.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + + err = ("Unexpected error while running command.\n", + "Command: ['mount', '-o', 'ro,sync', '-t', 'auto', ", + "'/dev/sda1', '/fake-tmp/dir']\n" + "Exit code: 32\n" + "Reason: -\n" + "Stdout: -\n" + "Stderr: mount: unknown filesystem type 'ntfs'") + self.m_mount_cb.side_effect = MountFailedError( + 'Failed mounting %s to %s due to: %s' % + ('/dev/sda', '/fake-tmp/dir', err)) + + value, msg = dsaz.can_dev_be_reformatted('/dev/sda', + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn('cannot mount NTFS, assuming', msg) + + def test_never_destroy_ntfs_config_false(self): + """Normally formattable situation with never_destroy_ntfs set.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=True) + self.assertFalse(value) + self.assertIn("config says to never destroy NTFS " + "(datasource.Azure.never_destroy_ntfs)", msg) + class TestAzureNetExists(CiTestCase): -- cgit v1.2.3 From 3b28bdc616f3e7f4d6b419629dc7b9efc3ae8d1e Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 23 May 2018 15:56:16 -0400 Subject: yaml_load/schema: Add invalid line and column nums to error message Yaml tracebacks are generally hard to read for average users. Add a bit of logic to util.yaml_load and schema validation to look for YAMLError.context_marker or problem_marker line and column counts. No longer log the full exceeption traceback from the yaml_load error, instead just LOG.warning for the specific error and point to the offending line and column where the problem exists. --- cloudinit/config/schema.py | 60 +++++++++++++++++++++-------- cloudinit/util.py | 16 +++++++- tests/unittests/test_handler/test_schema.py | 39 ++++++++++++++++--- tests/unittests/test_util.py | 30 +++++++++++++-- 4 files changed, 118 insertions(+), 27 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 3bad8e22..080a6d06 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False): def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): """Return contents of the cloud-config file annotated with schema errors. - @param cloudconfig: YAML-loaded object from the original_content. + @param cloudconfig: YAML-loaded dict from the original_content or empty + dict if unparseable. @param original_content: The contents of a cloud-config file @param schema_errors: List of tuples from a JSONSchemaValidationError. The tuples consist of (schemapath, error_message). """ if not schema_errors: return original_content - schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content) + schemapaths = {} + if cloudconfig: + schemapaths = _schemapath_for_cloudconfig( + cloudconfig, original_content) errors_by_line = defaultdict(list) error_count = 1 error_footer = [] annotated_content = [] for path, msg in schema_errors: - errors_by_line[schemapaths[path]].append(msg) + match = re.match(r'format-l(?P\d+)\.c(?P\d+).*', path) + if match: + line, col = match.groups() + errors_by_line[int(line)].append(msg) + else: + col = None + errors_by_line[schemapaths[path]].append(msg) + if col is not None: + msg = 'Line {line} column {col}: {msg}'.format( + line=line, col=col, msg=msg) error_footer.append('# E{0}: {1}'.format(error_count, msg)) error_count += 1 lines = original_content.decode().split('\n') @@ -142,18 +155,31 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): content = load_file(config_path, decode=False) if not content.startswith(CLOUD_CONFIG_HEADER): errors = ( - ('header', 'File {0} needs to begin with "{1}"'.format( + ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format( config_path, CLOUD_CONFIG_HEADER.decode())),) - raise SchemaValidationError(errors) - + error = SchemaValidationError(errors) + if annotate: + print(annotated_cloudconfig_file({}, content, error.schema_errors)) + raise error try: cloudconfig = yaml.safe_load(content) - except yaml.parser.ParserError as e: - errors = ( - ('format', 'File {0} is not valid yaml. {1}'.format( - config_path, str(e))),) - raise SchemaValidationError(errors) - + except (yaml.YAMLError) as e: + line = column = 1 + mark = None + if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): + mark = getattr(e, 'context_mark') + elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): + mark = getattr(e, 'problem_mark') + if mark: + line = mark.line + 1 + column = mark.column + 1 + errors = (('format-l{line}.c{col}'.format(line=line, col=column), + 'File {0} is not valid yaml. {1}'.format( + config_path, str(e))),) + error = SchemaValidationError(errors) + if annotate: + print(annotated_cloudconfig_file({}, content, error.schema_errors)) + raise error try: validate_cloudconfig_schema( cloudconfig, schema, strict=True) @@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content): list_index = 0 RE_YAML_INDENT = r'^(\s*)' scopes = [] - for line_number, line in enumerate(content_lines): + for line_number, line in enumerate(content_lines, 1): indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) line = line.strip() if not line or line.startswith('#'): @@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content): scopes.append((indent_depth + 2, key + '.0')) for inner_list_index in range(0, len(yaml.safe_load(value))): list_key = key + '.' + str(inner_list_index) - schema_line_numbers[list_key] = line_number + 1 - schema_line_numbers[key] = line_number + 1 + schema_line_numbers[list_key] = line_number + schema_line_numbers[key] = line_number return schema_line_numbers @@ -337,9 +363,11 @@ def handle_schema_args(name, args): try: validate_cloudconfig_file( args.config_file, full_schema, args.annotate) - except (SchemaValidationError, RuntimeError) as e: + except SchemaValidationError as e: if not args.annotate: error(str(e)) + except RuntimeError as e: + error(str(e)) else: print("Valid cloud-config file {0}".format(args.config_file)) if args.doc: diff --git a/cloudinit/util.py b/cloudinit/util.py index 653ed6ea..c0473b8c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -874,8 +874,20 @@ def load_yaml(blob, default=None, allowed=(dict,)): " but got %s instead") % (allowed, type_utils.obj_name(converted))) loaded = converted - except (yaml.YAMLError, TypeError, ValueError): - logexc(LOG, "Failed loading yaml blob") + except (yaml.YAMLError, TypeError, ValueError) as e: + msg = 'Failed loading yaml blob' + mark = None + if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): + mark = getattr(e, 'context_mark') + elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): + mark = getattr(e, 'problem_mark') + if mark: + msg += ( + '. Invalid format at line {line} column {col}: "{err}"'.format( + line=mark.line + 1, col=mark.column + 1, err=e)) + else: + msg += '. {err}'.format(err=e) + LOG.warning(msg) return loaded diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index ac41f124..fb266faf 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -134,22 +134,35 @@ class ValidateCloudConfigFileTest(CiTestCase): with self.assertRaises(SchemaValidationError) as context_mgr: validate_cloudconfig_file(self.config_file, {}) self.assertEqual( - 'Cloud config schema errors: header: File {0} needs to begin with ' - '"{1}"'.format(self.config_file, CLOUD_CONFIG_HEADER.decode()), + 'Cloud config schema errors: format-l1.c1: File {0} needs to begin' + ' with "{1}"'.format( + self.config_file, CLOUD_CONFIG_HEADER.decode()), str(context_mgr.exception)) - def test_validateconfig_file_error_on_non_yaml_format(self): - """On non-yaml format, validate_cloudconfig_file errors.""" + def test_validateconfig_file_error_on_non_yaml_scanner_error(self): + """On non-yaml scan issues, validate_cloudconfig_file errors.""" + # Generate a scanner error by providing text on a single line with + # improper indent. + write_file(self.config_file, '#cloud-config\nasdf:\nasdf') + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_file(self.config_file, {}) + self.assertIn( + 'schema errors: format-l3.c1: File {0} is not valid yaml.'.format( + self.config_file), + str(context_mgr.exception)) + + def test_validateconfig_file_error_on_non_yaml_parser_error(self): + """On non-yaml parser issues, validate_cloudconfig_file errors.""" write_file(self.config_file, '#cloud-config\n{}}') with self.assertRaises(SchemaValidationError) as context_mgr: validate_cloudconfig_file(self.config_file, {}) self.assertIn( - 'schema errors: format: File {0} is not valid yaml.'.format( + 'schema errors: format-l2.c3: File {0} is not valid yaml.'.format( self.config_file), str(context_mgr.exception)) @skipUnlessJsonSchema() - def test_validateconfig_file_sctricty_validates_schema(self): + def test_validateconfig_file_sctrictly_validates_schema(self): """validate_cloudconfig_file raises errors on invalid schema.""" schema = { 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}} @@ -342,6 +355,20 @@ class MainTest(CiTestCase): 'Expected either --config-file argument or --doc\n', m_stderr.getvalue()) + def test_main_absent_config_file(self): + """Main exits non-zero when config file is absent.""" + myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE'] + with mock.patch('sys.exit', side_effect=self.sys_exit): + with mock.patch('sys.argv', myargs): + with mock.patch('sys.stderr', new_callable=StringIO) as \ + m_stderr: + with self.assertRaises(SystemExit) as context_manager: + main() + self.assertEqual(1, context_manager.exception.code) + self.assertEqual( + 'Configfile NOT_A_FILE does not exist\n', + m_stderr.getvalue()) + def test_main_prints_docs(self): """When --doc parameter is provided, main generates documentation.""" myargs = ['mycmd', '--doc'] diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 84941c7d..d774f3dc 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -4,6 +4,7 @@ from __future__ import print_function import logging import os +import re import shutil import stat import tempfile @@ -265,26 +266,49 @@ class TestGetCmdline(helpers.TestCase): self.assertEqual("abcd 123", ret) -class TestLoadYaml(helpers.TestCase): +class TestLoadYaml(helpers.CiTestCase): mydefault = "7b03a8ebace993d806255121073fed52" + with_logs = True def test_simple(self): mydata = {'1': "one", '2': "two"} self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata) def test_nonallowed_returns_default(self): + '''Any unallowed types result in returning default; log the issue.''' # for now, anything not in the allowed list just returns the default. myyaml = yaml.dump({'1': "one"}) self.assertEqual(util.load_yaml(blob=myyaml, default=self.mydefault, allowed=(str,)), self.mydefault) - - def test_bogus_returns_default(self): + regex = re.compile( + r'Yaml load allows \(<(class|type) \'str\'>,\) root types, but' + r' got dict') + self.assertTrue(regex.search(self.logs.getvalue()), + msg='Missing expected yaml load error') + + def test_bogus_scan_error_returns_default(self): + '''On Yaml scan error, load_yaml returns the default and logs issue.''' badyaml = "1\n 2:" self.assertEqual(util.load_yaml(blob=badyaml, default=self.mydefault), self.mydefault) + self.assertIn( + 'Failed loading yaml blob. Invalid format at line 2 column 3:' + ' "mapping values are not allowed here', + self.logs.getvalue()) + + def test_bogus_parse_error_returns_default(self): + '''On Yaml parse error, load_yaml returns default and logs issue.''' + badyaml = "{}}" + self.assertEqual(util.load_yaml(blob=badyaml, + default=self.mydefault), + self.mydefault) + self.assertIn( + 'Failed loading yaml blob. Invalid format at line 1 column 3:' + " \"expected \'\', but found \'}\'", + self.logs.getvalue()) def test_unsafe_types(self): # should not load complex types -- cgit v1.2.3 From 12799d96f85e210c8e1216a3b06d8a98468fedd7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 23 May 2018 16:04:42 -0400 Subject: tests: Avoid using https in httpretty, improve HttPretty test case. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On OpenSuSE 42.3, we would get errors running tests/unittests/test_handler/test_handler_chef.py  - test_myhttps_nonet raises a UnmockedError    No mocking was registered, and real connections are not allowed  - test_myhttps_net raises SSLError    ("bad handshake: SysCallError(32, 'EPIPE')",) This fixes the errors by just using http instead of https. Also it modifies the HttprettyTestCase to do the httpretty activate and deactivate itself in setUp and tearDown. Then we don't have to decorate individual test_ methods. Also, we set    httpretty.HTTPretty.allow_net_connect = False Test cases here should not reach out to a network resource. LP: #1771659 --- cloudinit/tests/helpers.py | 8 ++++++++ tests/unittests/test_data.py | 13 +++++++++++-- tests/unittests/test_datasource/test_aliyun.py | 2 -- tests/unittests/test_datasource/test_ec2.py | 12 ------------ tests/unittests/test_datasource/test_gce.py | 1 - tests/unittests/test_datasource/test_openstack.py | 12 ------------ tests/unittests/test_datasource/test_scaleway.py | 3 --- tests/unittests/test_ec2_util.py | 9 --------- tests/unittests/test_handler/test_handler_chef.py | 16 ++++++++++++---- 9 files changed, 31 insertions(+), 45 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 07059fd4..5bfe7fa4 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -3,6 +3,7 @@ from __future__ import print_function import functools +import httpretty import logging import os import shutil @@ -303,14 +304,21 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): class HttprettyTestCase(CiTestCase): # necessary as http_proxy gets in the way of httpretty # https://github.com/gabrielfalcao/HTTPretty/issues/122 + # Also make sure that allow_net_connect is set to False. + # And make sure reset and enable/disable are done. def setUp(self): self.restore_proxy = os.environ.get('http_proxy') if self.restore_proxy is not None: del os.environ['http_proxy'] super(HttprettyTestCase, self).setUp() + httpretty.HTTPretty.allow_net_connect = False + httpretty.reset() + httpretty.enable() def tearDown(self): + httpretty.disable() + httpretty.reset() if self.restore_proxy: os.environ['http_proxy'] = self.restore_proxy super(HttprettyTestCase, self).tearDown() diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 275b16d2..91d35cb8 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -524,7 +524,17 @@ c: 4 self.assertEqual(cfg.get('password'), 'gocubs') self.assertEqual(cfg.get('locale'), 'chicago') - @httpretty.activate + +class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): + + def setUp(self): + TestConsumeUserData.setUp(self) + helpers.HttprettyTestCase.setUp(self) + + def tearDown(self): + TestConsumeUserData.tearDown(self) + helpers.HttprettyTestCase.tearDown(self) + @mock.patch('cloudinit.url_helper.time.sleep') def test_include(self, mock_sleep): """Test #include.""" @@ -543,7 +553,6 @@ c: 4 cc = util.load_yaml(cc_contents) self.assertTrue(cc.get('included')) - @httpretty.activate @mock.patch('cloudinit.url_helper.time.sleep') def test_include_bad_url(self, mock_sleep): """Test #include with a bad URL.""" diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index 4fa9616b..1e77842f 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -130,7 +130,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): self.ds.get_hostname()) @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") - @httpretty.activate def test_with_mock_server(self, m_is_aliyun): m_is_aliyun.return_value = True self.regist_default_server() @@ -143,7 +142,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): self._test_host_name() @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") - @httpretty.activate def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): """If is_aliyun returns false, then get_data should return False.""" m_is_aliyun.return_value = False diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index dff8b1ec..497e7610 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -191,7 +191,6 @@ def register_mock_metaserver(base_url, data): register(base_url, 'not found', status=404) def myreg(*argc, **kwargs): - # print("register_url(%s, %s)" % (argc, kwargs)) return httpretty.register_uri(httpretty.GET, *argc, **kwargs) register_helper(myreg, base_url, data) @@ -236,7 +235,6 @@ class TestEc2(test_helpers.HttprettyTestCase): return_value=platform_data) if md: - httpretty.HTTPretty.allow_net_connect = False all_versions = ( [ds.min_metadata_version] + ds.extended_metadata_versions) for version in all_versions: @@ -255,7 +253,6 @@ class TestEc2(test_helpers.HttprettyTestCase): register_mock_metaserver(instance_id_url, None) return ds - @httpretty.activate def test_network_config_property_returns_version_1_network_data(self): """network_config property returns network version 1 for metadata. @@ -288,7 +285,6 @@ class TestEc2(test_helpers.HttprettyTestCase): m_get_mac.return_value = mac1 self.assertEqual(expected, ds.network_config) - @httpretty.activate def test_network_config_property_set_dhcp4_on_private_ipv4(self): """network_config property configures dhcp4 on private ipv4 nics. @@ -330,7 +326,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ds._network_config = {'cached': 'data'} self.assertEqual({'cached': 'data'}, ds.network_config) - @httpretty.activate @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): """Refresh the network_config Ec2 cache if network key is absent. @@ -364,7 +359,6 @@ class TestEc2(test_helpers.HttprettyTestCase): 'type': 'physical'}]} self.assertEqual(expected, ds.network_config) - @httpretty.activate def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): """get_instance-id gets DataSourceEc2Local.identity if not present. @@ -397,7 +391,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.metadata = DEFAULT_METADATA self.assertEqual('my-identity-id', ds.get_instance_id()) - @httpretty.activate @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" @@ -409,7 +402,6 @@ class TestEc2(test_helpers.HttprettyTestCase): self.assertTrue(ret) self.assertEqual(0, m_dhcp.call_count) - @httpretty.activate def test_valid_platform_with_strict_false(self): """Valid platform data should return true with strict_id false.""" ds = self._setup_ds( @@ -419,7 +411,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ret = ds.get_data() self.assertTrue(ret) - @httpretty.activate def test_unknown_platform_with_strict_true(self): """Unknown platform data with strict_id true should return False.""" uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' @@ -430,7 +421,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ret = ds.get_data() self.assertFalse(ret) - @httpretty.activate def test_unknown_platform_with_strict_false(self): """Unknown platform data with strict_id false should return True.""" uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' @@ -462,7 +452,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ' not {0}'.format(platform_name)) self.assertIn(message, self.logs.getvalue()) - @httpretty.activate @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): """DataSourceEc2Local returns False on BSD. @@ -481,7 +470,6 @@ class TestEc2(test_helpers.HttprettyTestCase): "FreeBSD doesn't support running dhclient with -sf", self.logs.getvalue()) - @httpretty.activate @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.find_fallback_nic') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index eb3cec42..41176c6a 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -78,7 +78,6 @@ def _set_mock_metadata(gce_meta=None): return (404, headers, '') # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316 - httpretty.reset() httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback) diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 42c31554..bb180c08 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -135,7 +135,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): super(TestOpenStackDataSource, self).setUp() self.tmp = self.tmp_dir() - @hp.activate def test_successful(self): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) f = _read_metadata_service() @@ -157,7 +156,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', metadata.get('instance-id')) - @hp.activate def test_no_ec2(self): _register_uris(self.VERSION, {}, {}, OS_FILES) f = _read_metadata_service() @@ -168,7 +166,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual({}, f.get('ec2-metadata')) self.assertEqual(2, f.get('version')) - @hp.activate def test_bad_metadata(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -177,7 +174,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.NonReadable, _read_metadata_service) - @hp.activate def test_bad_uuid(self): os_files = copy.deepcopy(OS_FILES) os_meta = copy.deepcopy(OSTACK_META) @@ -188,7 +184,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) - @hp.activate def test_userdata_empty(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -201,7 +196,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertFalse(f.get('userdata')) - @hp.activate def test_vendordata_empty(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -213,7 +207,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertFalse(f.get('vendordata')) - @hp.activate def test_vendordata_invalid(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -222,7 +215,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) - @hp.activate def test_metadata_invalid(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -231,7 +223,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) - @hp.activate def test_datasource(self): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, @@ -251,7 +242,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) self.assertIsNone(ds_os.vendordata_raw) - @hp.activate def test_bad_datasource_meta(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -266,7 +256,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertFalse(found) self.assertIsNone(ds_os.version) - @hp.activate def test_no_datasource(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -285,7 +274,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertFalse(found) self.assertIsNone(ds_os.version) - @hp.activate def test_disabled_datasource(self): os_files = copy.deepcopy(OS_FILES) os_meta = copy.deepcopy(OSTACK_META) diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 8dec06b1..e4e9bb20 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -176,7 +176,6 @@ class TestDataSourceScaleway(HttprettyTestCase): self.vendordata_url = \ DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] - @httpretty.activate @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @@ -212,7 +211,6 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) - @httpretty.activate @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @@ -236,7 +234,6 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.get_vendordata_raw()) self.assertEqual(sleep.call_count, 0) - @httpretty.activate @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index af78997f..3f50f57d 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -11,7 +11,6 @@ from cloudinit import url_helper as uh class TestEc2Util(helpers.HttprettyTestCase): VERSION = 'latest' - @hp.activate def test_userdata_fetch(self): hp.register_uri(hp.GET, 'http://169.254.169.254/%s/user-data' % (self.VERSION), @@ -20,7 +19,6 @@ class TestEc2Util(helpers.HttprettyTestCase): userdata = eu.get_instance_userdata(self.VERSION) self.assertEqual('stuff', userdata.decode('utf-8')) - @hp.activate def test_userdata_fetch_fail_not_found(self): hp.register_uri(hp.GET, 'http://169.254.169.254/%s/user-data' % (self.VERSION), @@ -28,7 +26,6 @@ class TestEc2Util(helpers.HttprettyTestCase): userdata = eu.get_instance_userdata(self.VERSION, retries=0) self.assertEqual('', userdata) - @hp.activate def test_userdata_fetch_fail_server_dead(self): hp.register_uri(hp.GET, 'http://169.254.169.254/%s/user-data' % (self.VERSION), @@ -36,7 +33,6 @@ class TestEc2Util(helpers.HttprettyTestCase): userdata = eu.get_instance_userdata(self.VERSION, retries=0) self.assertEqual('', userdata) - @hp.activate def test_userdata_fetch_fail_server_not_found(self): hp.register_uri(hp.GET, 'http://169.254.169.254/%s/user-data' % (self.VERSION), @@ -44,7 +40,6 @@ class TestEc2Util(helpers.HttprettyTestCase): userdata = eu.get_instance_userdata(self.VERSION) self.assertEqual('', userdata) - @hp.activate def test_metadata_fetch_no_keys(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, @@ -62,7 +57,6 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(md['instance-id'], '123') self.assertEqual(md['ami-launch-index'], '1') - @hp.activate def test_metadata_fetch_key(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, @@ -83,7 +77,6 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(md['instance-id'], '123') self.assertEqual(1, len(md['public-keys'])) - @hp.activate def test_metadata_fetch_with_2_keys(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, @@ -108,7 +101,6 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(md['instance-id'], '123') self.assertEqual(2, len(md['public-keys'])) - @hp.activate def test_metadata_fetch_bdm(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, @@ -140,7 +132,6 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(bdm['ami'], 'sdb') self.assertEqual(bdm['ephemeral0'], 'sdc') - @hp.activate def test_metadata_no_security_credentials(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index 0136a93d..f4bbd66d 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -14,19 +14,27 @@ from cloudinit.sources import DataSourceNone from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipIf) + HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf) LOG = logging.getLogger(__name__) CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"]) +# This is adjusted to use http because using with https causes issue +# in some openssl/httpretty combinations. +# https://github.com/gabrielfalcao/HTTPretty/issues/242 +# We saw issue in opensuse 42.3 with +# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1 +OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:") -class TestInstallChefOmnibus(CiTestCase): + +class TestInstallChefOmnibus(HttprettyTestCase): def setUp(self): + super(TestInstallChefOmnibus, self).setUp() self.new_root = self.tmp_dir() - @httpretty.activate + @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) def test_install_chef_from_omnibus_runs_chef_url_content(self): """install_chef_from_omnibus runs downloaded OMNIBUS_URL as script.""" chef_outfile = self.tmp_path('chef.out', self.new_root) @@ -65,7 +73,7 @@ class TestInstallChefOmnibus(CiTestCase): expected_subp_kwargs, m_subp_blob.call_args_list[0][1]) - @httpretty.activate + @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) @mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile') def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob): """install_chef_from_omnibus provides version arg to OMNIBUS_URL.""" -- cgit v1.2.3 From cd1de5f47ab6b82f2c6fd61a5f6681f33b3e5705 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 23 May 2018 16:08:43 -0600 Subject: openstack: Allow discovery in init-local using dhclient in a sandbox. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Network has not yet been configured in the init-local stage so the openstack datasource will use dhcp-client to temporarily obtain an ipv4 address and query the metadata service at http://169.254.169.254 to get network_data.json configuration. If present, the datasource will return network_config version 1 config based on that network_data.json content. Previously OpenStack datasource only setup dhcp on the fallback interface so this represents a change in behavior to react to the full config provided by openstack. Also significant to OpenStack is the separation of a _crawl_data operation from get_data(). crawl_data walks the available metadata services and returns a dict of discovered content. get_data consumes the crawled_data,  caches it in the datasource and reacts to that data. /run/cloud-init/instance-data.json now published network_data.json or ec2_metadata key if that data is present on any datasource. The main reasons for the separation of crawl from get_data:  * Enable performance metrics of cloud-init's metadata crawls on each  * Enable cloud-init modules and scripts to query and consume metadata    content which may have updated/changed after cloud-init's initial cache    during instance boot. (Think hotplug) Also generalize common logic to base DataSource class/module:  * Move to a common UNSET variable up into base datasource module fix EC2,    ConfigDrive, OpenStack, SmartOS to use the global.  * Drop get_url_settings from Ec2, CloudStack and OpenStack and generalize    DataSource.get_url_params(). Allow subclasses to override url_max_wait,    url_timeout and url_retries params.  * Rename get_network_metadata bool to perform_dhcp_setup as it designates    whether EphemeralDHCPv4 setup is required before crawling metadata. LP: #1749717 --- cloudinit/sources/DataSourceCloudStack.py | 31 ++--- cloudinit/sources/DataSourceConfigDrive.py | 4 +- cloudinit/sources/DataSourceEc2.py | 48 ++----- cloudinit/sources/DataSourceOpenStack.py | 161 ++++++++++++++-------- cloudinit/sources/DataSourceSmartOS.py | 9 +- cloudinit/sources/__init__.py | 76 ++++++++++ cloudinit/sources/tests/test_init.py | 89 +++++++++++- tests/unittests/test_datasource/test_common.py | 1 + tests/unittests/test_datasource/test_openstack.py | 121 +++++++++++++++- 9 files changed, 416 insertions(+), 124 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 0df545fc..d4b758f2 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource): dsname = 'CloudStack' + # Setup read_url parameters per get_url_params. + url_max_wait = 120 + url_timeout = 50 + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'cs') @@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address = "http://%s/" % (self.vr_addr,) self.cfg = {} - def _get_url_settings(self): - mcfg = self.ds_cfg - max_wait = 120 - try: - max_wait = int(mcfg.get("max_wait", max_wait)) - except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) + def wait_for_metadata_service(self): + url_params = self.get_url_params() - if max_wait == 0: + if url_params.max_wait_seconds <= 0: return False - timeout = 50 - try: - timeout = int(mcfg.get("timeout", timeout)) - except Exception: - util.logexc(LOG, "Failed to get timeout, using %s", timeout) - - return (max_wait, timeout) - - def wait_for_metadata_service(self): - (max_wait, timeout) = self._get_url_settings() - urls = [uhelp.combine_url(self.metadata_address, 'latest/meta-data/instance-id')] start_time = time.time() - url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn) + url = uhelp.wait_for_url( + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warn) if url: LOG.debug("Using metadata source: '%s'", url) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 121cf215..4cb28977 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): self.version = None self.ec2_metadata = None self._network_config = None - self.network_json = None + self.network_json = sources.UNSET self.network_eni = None self.known_macs = None self.files = {} @@ -149,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): @property def network_config(self): if self._network_config is None: - if self.network_json is not None: + if self.network_json not in (None, sources.UNSET): LOG.debug("network config provided via network_json") self._network_config = openstack.convert_net_json( self.network_json, known_macs=self.known_macs) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 21e9ef84..968ab3f7 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -27,8 +27,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" -_unset = "_unset" - class Platforms(object): # TODO Rename and move to cloudinit.cloud.CloudNames @@ -59,15 +57,16 @@ class DataSourceEc2(sources.DataSource): # for extended metadata content. IPv6 support comes in 2016-09-02 extended_metadata_versions = ['2016-09-02'] + # Setup read_url parameters per get_url_params. + url_max_wait = 120 + url_timeout = 50 + _cloud_platform = None - _network_config = _unset # Used for caching calculated network config v1 + _network_config = sources.UNSET # Used to cache calculated network cfg v1 # Whether we want to get network configuration from the metadata service. - get_network_metadata = False - - # Track the discovered fallback nic for use in configuration generation. - _fallback_interface = None + perform_dhcp_setup = False def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) @@ -98,7 +97,7 @@ class DataSourceEc2(sources.DataSource): elif self.cloud_platform == Platforms.NO_EC2_METADATA: return False - if self.get_network_metadata: # Setup networking in init-local stage. + if self.perform_dhcp_setup: # Setup networking in init-local stage. if util.is_FreeBSD(): LOG.debug("FreeBSD doesn't support running dhclient with -sf") return False @@ -158,27 +157,11 @@ class DataSourceEc2(sources.DataSource): else: return self.metadata['instance-id'] - def _get_url_settings(self): - mcfg = self.ds_cfg - max_wait = 120 - try: - max_wait = int(mcfg.get("max_wait", max_wait)) - except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) - - timeout = 50 - try: - timeout = max(0, int(mcfg.get("timeout", timeout))) - except Exception: - util.logexc(LOG, "Failed to get timeout, using %s", timeout) - - return (max_wait, timeout) - def wait_for_metadata_service(self): mcfg = self.ds_cfg - (max_wait, timeout) = self._get_url_settings() - if max_wait <= 0: + url_params = self.get_url_params() + if url_params.max_wait_seconds <= 0: return False # Remove addresses from the list that wont resolve. @@ -205,7 +188,8 @@ class DataSourceEc2(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url( - urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn) + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warn) if url: self.metadata_address = url2base[url] @@ -310,11 +294,11 @@ class DataSourceEc2(sources.DataSource): @property def network_config(self): """Return a network config dict for rendering ENI or netplan files.""" - if self._network_config != _unset: + if self._network_config != sources.UNSET: return self._network_config if self.metadata is None: - # this would happen if get_data hadn't been called. leave as _unset + # this would happen if get_data hadn't been called. leave as UNSET LOG.warning( "Unexpected call to network_config when metadata is None.") return None @@ -353,9 +337,7 @@ class DataSourceEc2(sources.DataSource): self._fallback_interface = _legacy_fbnic self.fallback_nic = None else: - self._fallback_interface = net.find_fallback_nic() - if self._fallback_interface is None: - LOG.warning("Did not find a fallback interface on EC2.") + return super(DataSourceEc2, self).fallback_interface return self._fallback_interface def _crawl_metadata(self): @@ -390,7 +372,7 @@ class DataSourceEc2Local(DataSourceEc2): metadata service. If the metadata service provides network configuration then render the network configuration for that instance based on metadata. """ - get_network_metadata = True # Get metadata network config if present + perform_dhcp_setup = True # Use dhcp before querying metadata def get_data(self): supported_platforms = (Platforms.AWS,) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index fb166ae1..1a12a3f1 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -7,6 +7,7 @@ import time from cloudinit import log as logging +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit import sources from cloudinit import url_helper from cloudinit import util @@ -27,46 +28,25 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): dsname = "OpenStack" + _network_config = sources.UNSET # Used to cache calculated network cfg v1 + + # Whether we want to get network configuration from the metadata service. + perform_dhcp_setup = False + def __init__(self, sys_cfg, distro, paths): super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) self.metadata_address = None self.ssl_details = util.fetch_ssl_details(self.paths) self.version = None self.files = {} - self.ec2_metadata = None + self.ec2_metadata = sources.UNSET + self.network_json = sources.UNSET def __str__(self): root = sources.DataSource.__str__(self) mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) return mstr - def _get_url_settings(self): - # TODO(harlowja): this is shared with ec2 datasource, we should just - # move it to a shared location instead... - # Note: the defaults here are different though. - - # max_wait < 0 indicates do not wait - max_wait = -1 - timeout = 10 - retries = 5 - - try: - max_wait = int(self.ds_cfg.get("max_wait", max_wait)) - except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) - - try: - timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) - except Exception: - util.logexc(LOG, "Failed to get timeout, using %s", timeout) - - try: - retries = int(self.ds_cfg.get("retries", retries)) - except Exception: - util.logexc(LOG, "Failed to get retries. using %s", retries) - - return (max_wait, timeout, retries) - def wait_for_metadata_service(self): urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) filtered = [x for x in urls if util.is_resolvable_url(x)] @@ -86,10 +66,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls.append(md_url) url2base[md_url] = url - (max_wait, timeout, _retries) = self._get_url_settings() + url_params = self.get_url_params() start_time = time.time() - avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, - timeout=timeout) + avail_url = url_helper.wait_for_url( + urls=md_urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds) if avail_url: LOG.debug("Using metadata source: '%s'", url2base[avail_url]) else: @@ -99,38 +80,64 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.metadata_address = url2base.get(avail_url) return bool(avail_url) - def _get_data(self): - try: - if not self.wait_for_metadata_service(): - return False - except IOError: - return False + def check_instance_id(self, sys_cfg): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) - (_max_wait, timeout, retries) = self._get_url_settings() + @property + def network_config(self): + """Return a network config dict for rendering ENI or netplan files.""" + if self._network_config != sources.UNSET: + return self._network_config + + # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide + # network_config by default unless configured in /etc/cloud/cloud.cfg*. + # Patch Xenial and Artful before release to default to False. + if util.is_false(self.ds_cfg.get('apply_network_config', True)): + self._network_config = None + return self._network_config + if self.network_json == sources.UNSET: + # this would happen if get_data hadn't been called. leave as UNSET + LOG.warning( + 'Unexpected call to network_config when network_json is None.') + return None + + LOG.debug('network config provided via network_json') + self._network_config = openstack.convert_net_json( + self.network_json, known_macs=None) + return self._network_config - try: - results = util.log_time(LOG.debug, - 'Crawl of openstack metadata service', - read_metadata_service, - args=[self.metadata_address], - kwargs={'ssl_details': self.ssl_details, - 'retries': retries, - 'timeout': timeout}) - except openstack.NonReadable: - return False - except (openstack.BrokenMetadata, IOError): - util.logexc(LOG, "Broken metadata address %s", - self.metadata_address) - return False + def _get_data(self): + """Crawl metadata, parse and persist that data for this instance. + + @return: True when metadata discovered indicates OpenStack datasource. + False when unable to contact metadata service or when metadata + format is invalid or disabled. + """ + if self.perform_dhcp_setup: # Setup networking in init-local stage. + try: + with EphemeralDHCPv4(self.fallback_interface): + results = util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self._crawl_metadata) + except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e: + util.logexc(LOG, str(e)) + return False + else: + try: + results = self._crawl_metadata() + except sources.InvalidMetaDataException as e: + util.logexc(LOG, str(e)) + return False self.dsmode = self._determine_dsmode([results.get('dsmode')]) if self.dsmode == sources.DSMODE_DISABLED: return False - md = results.get('metadata', {}) md = util.mergemanydict([md, DEFAULT_METADATA]) self.metadata = md self.ec2_metadata = results.get('ec2-metadata') + self.network_json = results.get('networkdata') self.userdata_raw = results.get('userdata') self.version = results['version'] self.files.update(results.get('files', {})) @@ -145,9 +152,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): return True - def check_instance_id(self, sys_cfg): - # quickly (local check only) if self.instance_id is still valid - return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def _crawl_metadata(self): + """Crawl metadata service when available. + + @returns: Dictionary with all metadata discovered for this datasource. + @raise: InvalidMetaDataException on unreadable or broken + metadata. + """ + try: + if not self.wait_for_metadata_service(): + raise sources.InvalidMetaDataException( + 'No active metadata service found') + except IOError as e: + raise sources.InvalidMetaDataException( + 'IOError contacting metadata service: {error}'.format( + error=str(e))) + + url_params = self.get_url_params() + + try: + result = util.log_time( + LOG.debug, 'Crawl of openstack metadata service', + read_metadata_service, args=[self.metadata_address], + kwargs={'ssl_details': self.ssl_details, + 'retries': url_params.num_retries, + 'timeout': url_params.timeout_seconds}) + except openstack.NonReadable as e: + raise sources.InvalidMetaDataException(str(e)) + except (openstack.BrokenMetadata, IOError): + msg = 'Broken metadata address {addr}'.format( + addr=self.metadata_address) + raise sources.InvalidMetaDataException(msg) + return result + + +class DataSourceOpenStackLocal(DataSourceOpenStack): + """Run in init-local using a dhcp discovery prior to metadata crawl. + + In init-local, no network is available. This subclass sets up minimal + networking with dhclient on a viable nic so that it can talk to the + metadata service. If the metadata service provides network configuration + then render the network configuration for that instance based on metadata. + """ + + perform_dhcp_setup = True # Get metadata network config if present def read_metadata_service(base_url, ssl_details=None, @@ -159,6 +207,7 @@ def read_metadata_service(base_url, ssl_details=None, # Used to match classes to dependencies datasources = [ + (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)), (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index fcb46b14..c91e4d59 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -165,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource): dsname = "Joyent" - _unset = "_unset" - smartos_type = _unset - md_client = _unset + smartos_type = sources.UNSET + md_client = sources.UNSET def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -189,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource): return "%s [client=%s]" % (root, self.md_client) def _init(self): - if self.smartos_type == self._unset: + if self.smartos_type == sources.UNSET: self.smartos_type = get_smartos_environ() if self.smartos_type is None: self.md_client = None - if self.md_client == self._unset: + if self.md_client == sources.UNSET: self.md_client = jmc_client_factory( smartos_type=self.smartos_type, metadata_sockfile=self.ds_cfg['metadata_sockfile'], diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index df0b374a..90d74575 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -9,6 +9,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc +from collections import namedtuple import copy import json import os @@ -17,6 +18,7 @@ import six from cloudinit.atomic_helper import write_json from cloudinit import importer from cloudinit import log as logging +from cloudinit import net from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util @@ -41,6 +43,8 @@ INSTANCE_JSON_FILE = 'instance-data.json' # Key which can be provide a cloud's official product name to cloud-init METADATA_CLOUD_NAME_KEY = 'cloud-name' +UNSET = "_unset" + LOG = logging.getLogger(__name__) @@ -48,6 +52,11 @@ class DataSourceNotFoundException(Exception): pass +class InvalidMetaDataException(Exception): + """Raised when metadata is broken, unavailable or disabled.""" + pass + + def process_base64_metadata(metadata, key_path=''): """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" md_copy = copy.deepcopy(metadata) @@ -68,6 +77,10 @@ def process_base64_metadata(metadata, key_path=''): return md_copy +URLParams = namedtuple( + 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) + + @six.add_metaclass(abc.ABCMeta) class DataSource(object): @@ -81,6 +94,14 @@ class DataSource(object): # Cached cloud_name as determined by _get_cloud_name _cloud_name = None + # Track the discovered fallback nic for use in configuration generation. + _fallback_interface = None + + # read_url_params + url_max_wait = -1 # max_wait < 0 means do not wait + url_timeout = 10 # timeout for each metadata url read attempt + url_retries = 5 # number of times to retry url upon 404 + def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro @@ -128,6 +149,14 @@ class DataSource(object): 'meta-data': self.metadata, 'user-data': self.get_userdata_raw(), 'vendor-data': self.get_vendordata_raw()}} + if hasattr(self, 'network_json'): + network_json = getattr(self, 'network_json') + if network_json != UNSET: + instance_data['ds']['network_json'] = network_json + if hasattr(self, 'ec2_metadata'): + ec2_metadata = getattr(self, 'ec2_metadata') + if ec2_metadata != UNSET: + instance_data['ds']['ec2_metadata'] = ec2_metadata instance_data.update( self._get_standardized_metadata()) try: @@ -149,6 +178,42 @@ class DataSource(object): 'Subclasses of DataSource must implement _get_data which' ' sets self.metadata, vendordata_raw and userdata_raw.') + def get_url_params(self): + """Return the Datasource's prefered url_read parameters. + + Subclasses may override url_max_wait, url_timeout, url_retries. + + @return: A URLParams object with max_wait_seconds, timeout_seconds, + num_retries. + """ + max_wait = self.url_max_wait + try: + max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait)) + except ValueError: + util.logexc( + LOG, "Config max_wait '%s' is not an int, using default '%s'", + self.ds_cfg.get("max_wait"), max_wait) + + timeout = self.url_timeout + try: + timeout = max( + 0, int(self.ds_cfg.get("timeout", self.url_timeout))) + except ValueError: + timeout = self.url_timeout + util.logexc( + LOG, "Config timeout '%s' is not an int, using default '%s'", + self.ds_cfg.get('timeout'), timeout) + + retries = self.url_retries + try: + retries = int(self.ds_cfg.get("retries", self.url_retries)) + except Exception: + util.logexc( + LOG, "Config retries '%s' is not an int, using default '%s'", + self.ds_cfg.get('retries'), retries) + + return URLParams(max_wait, timeout, retries) + def get_userdata(self, apply_filter=False): if self.userdata is None: self.userdata = self.ud_proc.process(self.get_userdata_raw()) @@ -161,6 +226,17 @@ class DataSource(object): self.vendordata = self.ud_proc.process(self.get_vendordata_raw()) return self.vendordata + @property + def fallback_interface(self): + """Determine the network interface used during local network config.""" + if self._fallback_interface is None: + self._fallback_interface = net.find_fallback_nic() + if self._fallback_interface is None: + LOG.warning( + "Did not find a fallback interface on %s.", + self.cloud_name) + return self._fallback_interface + @property def cloud_name(self): """Return lowercase cloud name as determined by the datasource. diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 452e9219..d5bc98a4 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -17,6 +17,7 @@ from cloudinit import util class DataSourceTestSubclassNet(DataSource): dsname = 'MyTestSubclass' + url_max_wait = 55 def __init__(self, sys_cfg, distro, paths, custom_userdata=None): super(DataSourceTestSubclassNet, self).__init__( @@ -70,8 +71,7 @@ class TestDataSource(CiTestCase): """Init uses DataSource.dsname for sourcing ds_cfg.""" sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} distro = 'distrotest' # generally should be a Distro object - paths = Paths({}) - datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths) + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) self.assertEqual({'key2': False}, datasource.ds_cfg) def test_str_is_classname(self): @@ -81,6 +81,91 @@ class TestDataSource(CiTestCase): 'DataSourceTestSubclassNet', str(DataSourceTestSubclassNet('', '', self.paths))) + def test_datasource_get_url_params_defaults(self): + """get_url_params default url config settings for the datasource.""" + params = self.datasource.get_url_params() + self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait) + self.assertEqual(params.timeout_seconds, self.datasource.url_timeout) + self.assertEqual(params.num_retries, self.datasource.url_retries) + + def test_datasource_get_url_params_subclassed(self): + """Subclasses can override get_url_params defaults.""" + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} + distro = 'distrotest' # generally should be a Distro object + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) + expected = (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries) + url_params = datasource.get_url_params() + self.assertNotEqual(self.datasource.get_url_params(), url_params) + self.assertEqual(expected, url_params) + + def test_datasource_get_url_params_ds_config_override(self): + """Datasource configuration options can override url param defaults.""" + sys_cfg = { + 'datasource': { + 'MyTestSubclass': { + 'max_wait': '1', 'timeout': '2', 'retries': '3'}}} + datasource = DataSourceTestSubclassNet( + sys_cfg, self.distro, self.paths) + expected = (1, 2, 3) + url_params = datasource.get_url_params() + self.assertNotEqual( + (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries), + url_params) + self.assertEqual(expected, url_params) + + def test_datasource_get_url_params_is_zero_or_greater(self): + """get_url_params ignores timeouts with a value below 0.""" + # Set an override that is below 0 which gets ignored. + sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}} + datasource = DataSource(sys_cfg, self.distro, self.paths) + (_max_wait, timeout, _retries) = datasource.get_url_params() + self.assertEqual(0, timeout) + + def test_datasource_get_url_uses_defaults_on_errors(self): + """On invalid system config values for url_params defaults are used.""" + # All invalid values should be logged + sys_cfg = {'datasource': { + '_undef': { + 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}} + datasource = DataSource(sys_cfg, self.distro, self.paths) + url_params = datasource.get_url_params() + expected = (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries) + self.assertEqual(expected, url_params) + logs = self.logs.getvalue() + expected_logs = [ + "Config max_wait 'nope' is not an int, using default '-1'", + "Config timeout 'bug' is not an int, using default '10'", + "Config retries 'nonint' is not an int, using default '5'", + ] + for log in expected_logs: + self.assertIn(log, logs) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_fallback_interface_is_discovered(self, m_get_fallback_nic): + """The fallback_interface is discovered via find_fallback_nic.""" + m_get_fallback_nic.return_value = 'nic9' + self.assertEqual('nic9', self.datasource.fallback_interface) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic): + """Log a warning when fallback_interface can not discover the nic.""" + self.datasource._cloud_name = 'MySupahCloud' + m_get_fallback_nic.return_value = None # Couldn't discover nic + self.assertIsNone(self.datasource.fallback_interface) + self.assertEqual( + 'WARNING: Did not find a fallback interface on MySupahCloud.\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic): + """The fallback_interface is cached and won't be rediscovered.""" + self.datasource._fallback_interface = 'nic10' + self.assertEqual('nic10', self.datasource.fallback_interface) + m_get_fallback_nic.assert_not_called() + def test__get_data_unimplemented(self): """Raise an error when _get_data is not implemented.""" with self.assertRaises(NotImplementedError) as context_manager: diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index ec333888..0d35dc29 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -40,6 +40,7 @@ DEFAULT_LOCAL = [ OVF.DataSourceOVF, SmartOS.DataSourceSmartOS, Ec2.DataSourceEc2Local, + OpenStack.DataSourceOpenStackLocal, ] DEFAULT_NETWORK = [ diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index bb180c08..fad73b21 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -16,7 +16,7 @@ from six import StringIO from cloudinit import helpers from cloudinit import settings -from cloudinit.sources import convert_vendordata +from cloudinit.sources import convert_vendordata, UNSET from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources.helpers import openstack from cloudinit import util @@ -129,6 +129,8 @@ def _read_metadata_service(): class TestOpenStackDataSource(test_helpers.HttprettyTestCase): + + with_logs = True VERSION = 'latest' def setUp(self): @@ -223,11 +225,11 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) - def test_datasource(self): + @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_datasource(self, m_dhcp): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) found = ds_os.get_data() self.assertTrue(found) @@ -241,6 +243,36 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(2, len(ds_os.files)) self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) self.assertIsNone(ds_os.vendordata_raw) + m_dhcp.assert_not_called() + + @hp.activate + @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_local_datasource(self, m_dhcp, m_net): + """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data.""" + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os_local = ds.DataSourceOpenStackLocal( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'broadcast-address': '192.168.2.255'}] + + self.assertIsNone(ds_os_local.version) + found = ds_os_local.get_data() + self.assertTrue(found) + self.assertEqual(2, ds_os_local.version) + md = dict(ds_os_local.metadata) + md.pop('instance-id', None) + md.pop('local-hostname', None) + self.assertEqual(OSTACK_META, md) + self.assertEqual(EC2_META, ds_os_local.ec2_metadata) + self.assertEqual(USER_DATA, ds_os_local.userdata_raw) + self.assertEqual(2, len(ds_os_local.files)) + self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) + self.assertIsNone(ds_os_local.vendordata_raw) + m_dhcp.assert_called_with('eth9') def test_bad_datasource_meta(self): os_files = copy.deepcopy(OS_FILES) @@ -255,6 +287,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) + self.assertIn( + 'InvalidMetaDataException: Broken metadata address' + ' http://169.254.169.25', + self.logs.getvalue()) def test_no_datasource(self): os_files = copy.deepcopy(OS_FILES) @@ -274,6 +310,52 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertFalse(found) self.assertIsNone(ds_os.version) + def test_network_config_disabled_by_datasource_config(self): + """The network_config can be disabled from datasource config.""" + mock_path = ( + 'cloudinit.sources.DataSourceOpenStack.openstack.' + 'convert_net_json') + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os.ds_cfg = {'apply_network_config': False} + sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], + 'networks': [], 'services': []} + ds_os.network_json = sample_json # Ignore this content from metadata + with test_helpers.mock.patch(mock_path) as m_convert_json: + self.assertIsNone(ds_os.network_config) + m_convert_json.assert_not_called() + + def test_network_config_from_network_json(self): + """The datasource gets network_config from network_data.json.""" + mock_path = ( + 'cloudinit.sources.DataSourceOpenStack.openstack.' + 'convert_net_json') + example_cfg = {'version': 1, 'config': []} + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], + 'networks': [], 'services': []} + ds_os.network_json = sample_json + with test_helpers.mock.patch(mock_path) as m_convert_json: + m_convert_json.return_value = example_cfg + self.assertEqual(example_cfg, ds_os.network_config) + self.assertIn( + 'network config provided via network_json', self.logs.getvalue()) + m_convert_json.assert_called_with(sample_json, known_macs=None) + + def test_network_config_cached(self): + """The datasource caches the network_config property.""" + mock_path = ( + 'cloudinit.sources.DataSourceOpenStack.openstack.' + 'convert_net_json') + example_cfg = {'version': 1, 'config': []} + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os._network_config = example_cfg + with test_helpers.mock.patch(mock_path) as m_convert_json: + self.assertEqual(example_cfg, ds_os.network_config) + m_convert_json.assert_not_called() + def test_disabled_datasource(self): os_files = copy.deepcopy(OS_FILES) os_meta = copy.deepcopy(OSTACK_META) @@ -296,6 +378,35 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertFalse(found) self.assertIsNone(ds_os.version) + @hp.activate + def test_wb__crawl_metadata_does_not_persist(self): + """_crawl_metadata returns current metadata and does not cache.""" + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + crawled_data = ds_os._crawl_metadata() + self.assertEqual(UNSET, ds_os.ec2_metadata) + self.assertIsNone(ds_os.userdata_raw) + self.assertEqual(0, len(ds_os.files)) + self.assertIsNone(ds_os.vendordata_raw) + self.assertEqual( + ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata', + 'userdata', 'vendordata', 'version'], + sorted(crawled_data.keys())) + self.assertEqual('local', crawled_data['dsmode']) + self.assertEqual(EC2_META, crawled_data['ec2-metadata']) + self.assertEqual(2, len(crawled_data['files'])) + md = copy.deepcopy(crawled_data['metadata']) + md.pop('instance-id') + md.pop('local-hostname') + self.assertEqual(OSTACK_META, md) + self.assertEqual( + json.loads(OS_FILES['openstack/latest/network_data.json']), + crawled_data['networkdata']) + self.assertEqual(USER_DATA, crawled_data['userdata']) + self.assertEqual(VENDOR_DATA, crawled_data['vendordata']) + self.assertEqual(2, crawled_data['version']) + class TestVendorDataLoading(test_helpers.TestCase): def cvj(self, data): -- cgit v1.2.3 From 3f99f4aba8af559f99f1d46b2b46bea7622da1d0 Mon Sep 17 00:00:00 2001 From: Dan McDonald Date: Thu, 24 May 2018 10:54:18 -0400 Subject: Enable SmartOS network metadata to work with netplan via per-subnet routes - Updated datadict reference URL - Store sdc:routes metadata in DatasourceSmartOS - Map sdc:routes values to per-interface subnet configuration - Added unittest Co-authored-by: Mike Gerdts LP: #1763512 --- cloudinit/sources/DataSourceSmartOS.py | 46 ++++++++++++++++++++++--- tests/unittests/test_datasource/test_smartos.py | 26 ++++++++++++++ 2 files changed, 67 insertions(+), 5 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index c91e4d59..f92e8b5c 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -17,7 +17,7 @@ # of a serial console. # # Certain behavior is defined by the DataDictionary -# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html +# https://eng.joyent.com/mdata/datadict.html # Comments with "@datadictionary" are snippets of the definition import base64 @@ -298,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource): self.userdata_raw = ud self.vendordata_raw = md['vendor-data'] self.network_data = md['network-data'] + self.routes_data = md['routes'] self._set_provisioned() return True @@ -321,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource): convert_smartos_network_data( network_data=self.network_data, dns_servers=self.metadata['dns_servers'], - dns_domain=self.metadata['dns_domain'])) + dns_domain=self.metadata['dns_domain'], + routes=self.routes_data)) return self._network_config @@ -760,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None): # Convert SMARTOS 'sdc:nics' data to network_config yaml def convert_smartos_network_data(network_data=None, - dns_servers=None, dns_domain=None): + dns_servers=None, dns_domain=None, + routes=None): """Return a dictionary of network_config by parsing provided SMARTOS sdc:nics configuration data @@ -778,6 +781,10 @@ def convert_smartos_network_data(network_data=None, keys are related to ip configuration. For each ip in the 'ips' list we create a subnet entry under 'subnets' pairing the ip to a one in the 'gateways' list. + + Each route in sdc:routes is mapped to a route on each interface. + The sdc:routes properties 'dst' and 'gateway' map to 'network' and + 'gateway'. The 'linklocal' sdc:routes property is ignored. """ valid_keys = { @@ -800,6 +807,10 @@ def convert_smartos_network_data(network_data=None, 'scope', 'type', ], + 'route': [ + 'network', + 'gateway', + ], } if dns_servers: @@ -814,6 +825,9 @@ def convert_smartos_network_data(network_data=None, else: dns_domain = [] + if not routes: + routes = [] + def is_valid_ipv4(addr): return '.' in addr @@ -840,6 +854,7 @@ def convert_smartos_network_data(network_data=None, if ip == "dhcp": subnet = {'type': 'dhcp4'} else: + routeents = [] subnet = dict((k, v) for k, v in nic.items() if k in valid_keys['subnet']) subnet.update({ @@ -861,6 +876,25 @@ def convert_smartos_network_data(network_data=None, pgws[proto]['gw'] = gateways[0] subnet.update({'gateway': pgws[proto]['gw']}) + for route in routes: + rcfg = dict((k, v) for k, v in route.items() + if k in valid_keys['route']) + # Linux uses the value of 'gateway' to determine + # automatically if the route is a forward/next-hop + # (non-local IP for gateway) or an interface/resolver + # (local IP for gateway). So we can ignore the + # 'interface' attribute of sdc:routes, because SDC + # guarantees that the gateway is a local IP for + # "interface=true". + # + # Eventually we should be smart and compare "gateway" + # to see if it's in the prefix. We can then smartly + # add or not-add this route. But for now, + # when in doubt, use brute force! Routes for everyone! + rcfg.update({'network': route['dst']}) + routeents.append(rcfg) + subnet.update({'routes': routeents}) + subnets.append(subnet) cfg.update({'subnets': subnets}) config.append(cfg) @@ -904,12 +938,14 @@ if __name__ == "__main__": keyname = SMARTOS_ATTRIB_JSON[key] data[key] = client.get_json(keyname) elif key == "network_config": - for depkey in ('network-data', 'dns_servers', 'dns_domain'): + for depkey in ('network-data', 'dns_servers', 'dns_domain', + 'routes'): load_key(client, depkey, data) data[key] = convert_smartos_network_data( network_data=data['network-data'], dns_servers=data['dns_servers'], - dns_domain=data['dns_domain']) + dns_domain=data['dns_domain'], + routes=data['routes']) else: if key in SMARTOS_ATTRIB_MAP: keyname, strip = SMARTOS_ATTRIB_MAP[key] diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 706e8eb8..dca0b3d4 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -1027,6 +1027,32 @@ class TestNetworkConversion(TestCase): found = convert_net(SDC_NICS_SINGLE_GATEWAY) self.assertEqual(expected, found) + def test_routes_on_all_nics(self): + routes = [ + {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'}, + {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}] + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes) + self.maxDiff = None + self.assertEqual(expected, found) + @unittest2.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, "Only supported on KVM and bhyve guests under SmartOS") -- cgit v1.2.3 From 9a41fce0c2399f05b2c904948c969623dc04e491 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 8 Jun 2018 12:11:31 -0400 Subject: subp: support combine_capture argument. This adds 'combine_capture' argument as was present in curtin's subp. It is useful to get interleaved output of a command. I noticed a need for it when looking at user_data_rhevm in DataSourceAltCloud. That will run a subcommand, logging its stdout but swallowing its stderr. Another thing to change to use this would be in udevadm_settle which currently just returns the subp() call. Also, add the docstring copied from curtin's subp. --- cloudinit/util.py | 63 +++++++++++++++++++++++++++++++++++++++----- tests/unittests/test_util.py | 8 ++++++ 2 files changed, 65 insertions(+), 6 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/util.py b/cloudinit/util.py index d9b61cfe..0017de72 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1876,9 +1876,55 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): return subp(*args, **kwargs) -def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, +def subp(args, data=None, rcs=None, env=None, capture=True, + combine_capture=False, shell=False, logstring=False, decode="replace", target=None, update_env=None, status_cb=None): + """Run a subprocess. + + :param args: command to run in a list. [cmd, arg1, arg2...] + :param data: input to the command, made available on its stdin. + :param rcs: + a list of allowed return codes. If subprocess exits with a value not + in this list, a ProcessExecutionError will be raised. By default, + data is returned as a string. See 'decode' parameter. + :param env: a dictionary for the command's environment. + :param capture: + boolean indicating if output should be captured. If True, then stderr + and stdout will be returned. If False, they will not be redirected. + :param combine_capture: + boolean indicating if stderr should be redirected to stdout. When True, + interleaved stderr and stdout will be returned as the first element of + a tuple, the second will be empty string or bytes (per decode). + if combine_capture is True, then output is captured independent of + the value of capture. + :param shell: boolean indicating if this should be run with a shell. + :param logstring: + the command will be logged to DEBUG. If it contains info that should + not be logged, then logstring will be logged instead. + :param decode: + if False, no decoding will be done and returned stdout and stderr will + be bytes. Other allowed values are 'strict', 'ignore', and 'replace'. + These values are passed through to bytes().decode() as the 'errors' + parameter. There is no support for decoding to other than utf-8. + :param target: + not supported, kwarg present only to make function signature similar + to curtin's subp. + :param update_env: + update the enviornment for this command with this dictionary. + this will not affect the current processes os.environ. + :param status_cb: + call this fuction with a single string argument before starting + and after finishing. + + :return + if not capturing, return is (None, None) + if capturing, stdout and stderr are returned. + if decode: + entries in tuple will be python2 unicode or python3 string + if not decode: + entries in tuple will be python2 string or python3 bytes + """ # not supported in cloud-init (yet), for now kept in the call signature # to ease maintaining code shared between cloud-init and curtin @@ -1904,7 +1950,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, status_cb('Begin run command: {command}\n'.format(command=command)) if not logstring: LOG.debug(("Running command %s with allowed return codes %s" - " (shell=%s, capture=%s)"), args, rcs, shell, capture) + " (shell=%s, capture=%s)"), + args, rcs, shell, 'combine' if combine_capture else capture) else: LOG.debug(("Running hidden command to protect sensitive " "input/output logstring: %s"), logstring) @@ -1915,6 +1962,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, if capture: stdout = subprocess.PIPE stderr = subprocess.PIPE + if combine_capture: + stdout = subprocess.PIPE + stderr = subprocess.STDOUT if data is None: # using devnull assures any reads get null, rather # than possibly waiting on input. @@ -1953,10 +2003,11 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, devnull_fp.close() # Just ensure blank instead of none. - if not out and capture: - out = b'' - if not err and capture: - err = b'' + if capture or combine_capture: + if not out: + out = b'' + if not err: + err = b'' if decode: def ldecode(data, m='utf-8'): if not isinstance(data, bytes): diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index d774f3dc..2db8e3fa 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -829,6 +829,14 @@ class TestSubp(helpers.CiTestCase): r'Missing #! in script\?', util.subp, (noshebang,)) + def test_subp_combined_stderr_stdout(self): + """Providing combine_capture as True redirects stderr to stdout.""" + data = b'hello world' + (out, err) = util.subp(self.stdin2err, capture=True, + combine_capture=True, decode=False, data=data) + self.assertEqual(b'', err) + self.assertEqual(data, out) + def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) self.assertIsNone(err) -- cgit v1.2.3 From c3f1ad9abd4a28c1b4c1f34db28ac72a646cdca6 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 09:23:08 -0600 Subject: netplan: fix mtu if provided by network config for all rendered types When network configuration for any interface defines maximum transmission values (MTU) the netplan, eni and sysconfig renders will take into account any device-level, or subnet-level mtu values. When network configuration has conflicting device-level and ipv4 subnet mtu values, the subnet-specific value is honored and a warning will be logged about any ignored device-level setting. LP: #1774666 --- cloudinit/net/eni.py | 20 +++++++++++++++++--- cloudinit/net/netplan.py | 22 ++++++++++++++-------- cloudinit/net/sysconfig.py | 7 +++++++ doc/rtd/topics/network-config-format-v1.rst | 27 +++++++++++++++++++++++++++ doc/rtd/topics/network-config-format-v2.rst | 6 ++++++ tests/unittests/test_net.py | 21 ++++++++++++++++++++- 6 files changed, 91 insertions(+), 12 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index c6a71d16..bd20a361 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -10,9 +10,12 @@ from . import ParserError from . import renderer from .network_state import subnet_is_ipv6 +from cloudinit import log as logging from cloudinit import util +LOG = logging.getLogger(__name__) + NET_CONFIG_COMMANDS = [ "pre-up", "up", "post-up", "down", "pre-down", "post-down", ] @@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet): # TODO: switch to valid_map for attrs -def _iface_add_attrs(iface, index): +def _iface_add_attrs(iface, index, ipv4_subnet_mtu): # If the index is non-zero, this is an alias interface. Alias interfaces # represent additional interface addresses, and should not have additional # attributes. (extra attributes here are almost always either incorrect, @@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index): value = 'on' if iface[key] else 'off' if not value or key in ignore_map: continue + if key == 'mtu' and ipv4_subnet_mtu: + if value != ipv4_subnet_mtu: + LOG.warning( + "Network config: ignoring %s device-level mtu:%s because" + " ipv4 subnet-level mtu:%s provided.", + iface['name'], value, ipv4_subnet_mtu) + continue if key in multiline_keys: for v in value: content.append(" {0} {1}".format(renames.get(key, key), v)) @@ -377,12 +387,15 @@ class Renderer(renderer.Renderer): subnets = iface.get('subnets', {}) if subnets: for index, subnet in enumerate(subnets): + ipv4_subnet_mtu = None iface['index'] = index iface['mode'] = subnet['type'] iface['control'] = subnet.get('control', 'auto') subnet_inet = 'inet' if subnet_is_ipv6(subnet): subnet_inet += '6' + else: + ipv4_subnet_mtu = subnet.get('mtu') iface['inet'] = subnet_inet if subnet['type'].startswith('dhcp'): iface['mode'] = 'dhcp' @@ -397,7 +410,7 @@ class Renderer(renderer.Renderer): _iface_start_entry( iface, index, render_hwaddress=render_hwaddress) + _iface_add_subnet(iface, subnet) + - _iface_add_attrs(iface, index) + _iface_add_attrs(iface, index, ipv4_subnet_mtu) ) for route in subnet.get('routes', []): lines.extend(self._render_route(route, indent=" ")) @@ -409,7 +422,8 @@ class Renderer(renderer.Renderer): if 'bond-master' in iface or 'bond-slaves' in iface: lines.append("auto {name}".format(**iface)) lines.append("iface {name} {inet} {mode}".format(**iface)) - lines.extend(_iface_add_attrs(iface, index=0)) + lines.extend( + _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None)) sections.append(lines) return sections diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 63443484..40143634 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match): if key.startswith(match)) -def _extract_addresses(config, entry): +def _extract_addresses(config, entry, ifname): """This method parse a cloudinit.net.network_state dictionary (config) and maps netstate keys/values into a dictionary (entry) to represent netplan yaml. @@ -124,6 +124,15 @@ def _extract_addresses(config, entry): addresses.append(addr) + if 'mtu' in config: + entry_mtu = entry.get('mtu') + if entry_mtu and config['mtu'] != entry_mtu: + LOG.warning( + "Network config: ignoring %s device-level mtu:%s because" + " ipv4 subnet-level mtu:%s provided.", + ifname, config['mtu'], entry_mtu) + else: + entry['mtu'] = config['mtu'] if len(addresses) > 0: entry.update({'addresses': addresses}) if len(routes) > 0: @@ -262,10 +271,7 @@ class Renderer(renderer.Renderer): else: del eth['match'] del eth['set-name'] - if 'mtu' in ifcfg: - eth['mtu'] = ifcfg.get('mtu') - - _extract_addresses(ifcfg, eth) + _extract_addresses(ifcfg, eth, ifname) ethernets.update({ifname: eth}) elif if_type == 'bond': @@ -288,7 +294,7 @@ class Renderer(renderer.Renderer): slave_interfaces = ifcfg.get('bond-slaves') if slave_interfaces == 'none': _extract_bond_slaves_by_name(interfaces, bond, ifname) - _extract_addresses(ifcfg, bond) + _extract_addresses(ifcfg, bond, ifname) bonds.update({ifname: bond}) elif if_type == 'bridge': @@ -321,7 +327,7 @@ class Renderer(renderer.Renderer): if len(br_config) > 0: bridge.update({'parameters': br_config}) - _extract_addresses(ifcfg, bridge) + _extract_addresses(ifcfg, bridge, ifname) bridges.update({ifname: bridge}) elif if_type == 'vlan': @@ -333,7 +339,7 @@ class Renderer(renderer.Renderer): macaddr = ifcfg.get('mac_address', None) if macaddr is not None: vlan['macaddress'] = macaddr.lower() - _extract_addresses(ifcfg, vlan) + _extract_addresses(ifcfg, vlan, ifname) vlans.update({ifname: vlan}) # inject global nameserver values under each all interface which diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index e53b9f1b..3d719238 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -304,6 +304,13 @@ class Renderer(renderer.Renderer): mtu_key = 'IPV6_MTU' iface_cfg['IPV6INIT'] = True if 'mtu' in subnet: + mtu_mismatch = bool(mtu_key in iface_cfg and + subnet['mtu'] != iface_cfg[mtu_key]) + if mtu_mismatch: + LOG.warning( + 'Network config: ignoring %s device-level mtu:%s' + ' because ipv4 subnet-level mtu:%s provided.', + iface_cfg.name, iface_cfg[mtu_key], subnet['mtu']) iface_cfg[mtu_key] = subnet['mtu'] elif subnet_type == 'manual': # If the subnet has an MTU setting, then ONBOOT=True diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst index 2f8ab54c..3b0148ca 100644 --- a/doc/rtd/topics/network-config-format-v1.rst +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -130,6 +130,18 @@ the bond interfaces. The ``bond_interfaces`` key accepts a list of network device ``name`` values from the configuration. This list may be empty. +**mtu**: ** + +The MTU key represents a device's Maximum Transmission Unit, the largest size +packet or frame, specified in octets (eight-bit bytes), that can be sent in a +packet- or frame-based network. Specifying ``mtu`` is optional. + +.. note:: + + The possible supported values of a device's MTU is not available at + configuration time. It's possible to specify a value too large or to + small for a device and may be ignored by the device. + **params**: ** The ``params`` key in a bond holds a dictionary of bonding parameters. @@ -268,6 +280,21 @@ Type ``vlan`` requires the following keys: - ``vlan_link``: Specify the underlying link via its ``name``. - ``vlan_id``: Specify the VLAN numeric id. +The following optional keys are supported: + +**mtu**: ** + +The MTU key represents a device's Maximum Transmission Unit, the largest size +packet or frame, specified in octets (eight-bit bytes), that can be sent in a +packet- or frame-based network. Specifying ``mtu`` is optional. + +.. note:: + + The possible supported values of a device's MTU is not available at + configuration time. It's possible to specify a value too large or to + small for a device and may be ignored by the device. + + **VLAN Example**:: network: diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index 335d236a..ea370ef5 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -174,6 +174,12 @@ recognized by ``inet_pton(3)`` Example for IPv4: ``gateway4: 172.16.0.1`` Example for IPv6: ``gateway6: 2001:4::1`` +**mtu**: ** + +The MTU key represents a device's Maximum Transmission Unit, the largest size +packet or frame, specified in octets (eight-bit bytes), that can be sent in a +packet- or frame-based network. Specifying ``mtu`` is optional. + **nameservers**: *<(mapping)>* Set DNS servers and search domains, for manual address configuration. There diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e13ca3ce..5ab61cf2 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -525,6 +525,7 @@ NETWORK_CONFIGS = { config: - type: 'physical' name: 'iface0' + mtu: 8999 subnets: - type: static address: 192.168.14.2/24 @@ -660,8 +661,8 @@ iface eth0.101 inet static dns-nameservers 192.168.0.10 10.23.23.134 dns-search barley.maas sacchromyces.maas brettanomyces.maas gateway 192.168.0.1 - hwaddress aa:bb:cc:dd:ee:11 mtu 1500 + hwaddress aa:bb:cc:dd:ee:11 vlan-raw-device eth0 vlan_id 101 @@ -757,6 +758,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true id: 101 link: eth0 macaddress: aa:bb:cc:dd:ee:11 + mtu: 1500 nameservers: addresses: - 192.168.0.10 @@ -920,6 +922,8 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true mtu: 1500 subnets: - type: static + # When 'mtu' matches device-level mtu, no warnings + mtu: 1500 address: 192.168.0.2/24 gateway: 192.168.0.1 dns_nameservers: @@ -1028,6 +1032,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - type: bond name: bond0 mac_address: "aa:bb:cc:dd:e8:ff" + mtu: 9000 bond_interfaces: - bond0s0 - bond0s1 @@ -1070,6 +1075,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true interfaces: - bond0s0 - bond0s1 + mtu: 9000 parameters: mii-monitor-interval: 100 mode: active-backup @@ -1157,6 +1163,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPADDR1=192.168.1.2 IPV6ADDR=2001:1::1/92 IPV6INIT=yes + MTU=9000 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 NM_CONTROLLED=no @@ -1203,6 +1210,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true name: en0 mac_address: "aa:bb:cc:dd:e8:00" - type: vlan + mtu: 2222 name: en0.99 vlan_link: en0 vlan_id: 99 @@ -1238,6 +1246,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPV6ADDR=2001:1::bbbb/96 IPV6INIT=yes IPV6_DEFAULTGW=2001:1::1 + MTU=2222 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 NM_CONTROLLED=no @@ -1669,6 +1678,8 @@ iface eth1 inet dhcp class TestSysConfigRendering(CiTestCase): + with_logs = True + scripts_dir = '/etc/sysconfig/network-scripts' header = ('# Created by cloud-init on instance boot automatically, ' 'do not edit.\n#\n') @@ -1917,6 +1928,9 @@ USERCTL=no found = self._render_and_read(network_config=yaml.load(entry['yaml'])) self._compare_files_to_expected(entry['expected_sysconfig'], found) self._assert_headers(found) + self.assertNotIn( + 'WARNING: Network config: ignoring eth0.101 device-level mtu', + self.logs.getvalue()) def test_small_config(self): entry = NETWORK_CONFIGS['small'] @@ -1929,6 +1943,10 @@ USERCTL=no found = self._render_and_read(network_config=yaml.load(entry['yaml'])) self._compare_files_to_expected(entry['expected_sysconfig'], found) self._assert_headers(found) + expected_msg = ( + 'WARNING: Network config: ignoring iface0 device-level mtu:8999' + ' because ipv4 subnet-level mtu:9000 provided.') + self.assertIn(expected_msg, self.logs.getvalue()) def test_dhcpv6_only_config(self): entry = NETWORK_CONFIGS['dhcpv6_only'] @@ -2410,6 +2428,7 @@ class TestNetplanRoundTrip(CiTestCase): class TestEniRoundTrip(CiTestCase): + def _render_and_read(self, network_config=None, state=None, eni_path=None, netrules_path=None, dir=None): if dir is None: -- cgit v1.2.3 From 171378613ef4de3c1bb4170a10ec1748ac62f39f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 13 Jun 2018 12:42:43 -0400 Subject: Fix get_proc_env for pids that have non-utf8 content in environment. There is no requirement that the environment of a process contains only utf-8 data. This modifies get_proc_env to support it reading data as binary and decoding if provided with an encoding. The default case is now that we now do: contents.decode('utf-8', 'replace') rather than contents.decode('utf-8', 'strict') LP: #1775371 --- cloudinit/util.py | 35 +++++++++++++++++---------- tests/unittests/test_util.py | 56 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 13 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/util.py b/cloudinit/util.py index 0017de72..26a41122 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2131,24 +2131,33 @@ def is_container(): return False -def get_proc_env(pid): +def get_proc_env(pid, encoding='utf-8', errors='replace'): """ Return the environment in a dict that a given process id was started with. - """ - env = {} - fn = os.path.join("/proc/", str(pid), "environ") + @param encoding: if true, then decoding will be done with + .decode(encoding, errors) and text will be returned. + if false then binary will be returned. + @param errors: only used if encoding is true.""" + fn = os.path.join("/proc", str(pid), "environ") + try: - contents = load_file(fn) - toks = contents.split("\x00") - for tok in toks: - if tok == "": - continue - (name, val) = tok.split("=", 1) - if name: - env[name] = val + contents = load_file(fn, decode=False) except (IOError, OSError): - pass + return {} + + env = {} + null, equal = (b"\x00", b"=") + if encoding: + null, equal = ("\x00", "=") + contents = contents.decode(encoding, errors) + + for tok in contents.split(null): + if not tok: + continue + (name, val) = tok.split(equal, 1) + if name: + env[name] = val return env diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 2db8e3fa..20479f66 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1089,4 +1089,60 @@ class TestLoadShellContent(helpers.TestCase): '']))) +class TestGetProcEnv(helpers.TestCase): + """test get_proc_env.""" + null = b'\x00' + simple1 = b'HOME=/' + simple2 = b'PATH=/bin:/sbin' + bootflag = b'BOOTABLE_FLAG=\x80' # from LP: #1775371 + mixed = b'MIXED=' + b'ab\xccde' + + def _val_decoded(self, blob, encoding='utf-8', errors='replace'): + # return the value portion of key=val decoded. + return blob.split(b'=', 1)[1].decode(encoding, errors) + + @mock.patch("cloudinit.util.load_file") + def test_non_utf8_in_environment(self, m_load_file): + """env may have non utf-8 decodable content.""" + content = self.null.join( + (self.bootflag, self.simple1, self.simple2, self.mixed)) + m_load_file.return_value = content + + self.assertEqual( + {'BOOTABLE_FLAG': self._val_decoded(self.bootflag), + 'HOME': '/', 'PATH': '/bin:/sbin', + 'MIXED': self._val_decoded(self.mixed)}, + util.get_proc_env(1)) + self.assertEqual(1, m_load_file.call_count) + + @mock.patch("cloudinit.util.load_file") + def test_encoding_none_returns_bytes(self, m_load_file): + """encoding none returns bytes.""" + lines = (self.bootflag, self.simple1, self.simple2, self.mixed) + content = self.null.join(lines) + m_load_file.return_value = content + + self.assertEqual( + dict([t.split(b'=') for t in lines]), + util.get_proc_env(1, encoding=None)) + self.assertEqual(1, m_load_file.call_count) + + @mock.patch("cloudinit.util.load_file") + def test_all_utf8_encoded(self, m_load_file): + """common path where only utf-8 decodable content.""" + content = self.null.join((self.simple1, self.simple2)) + m_load_file.return_value = content + self.assertEqual( + {'HOME': '/', 'PATH': '/bin:/sbin'}, + util.get_proc_env(1)) + self.assertEqual(1, m_load_file.call_count) + + @mock.patch("cloudinit.util.load_file") + def test_non_existing_file_returns_empty_dict(self, m_load_file): + """as implemented, a non-existing pid returns empty dict. + This is how it was originally implemented.""" + m_load_file.side_effect = OSError("File does not exist.") + self.assertEqual({}, util.get_proc_env(1)) + self.assertEqual(1, m_load_file.call_count) + # vi: ts=4 expandtab -- cgit v1.2.3 From faa6f07e9de4058083a5f69ed508b6e24bd53b23 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 13 Jun 2018 12:54:43 -0400 Subject: Be more safe on string/bytes when writing multipart user-data to disk. When creating the multipart mime message that is written as user-data.txt.i, cloud-init losing data on conversion to some things as a string. LP: #1768600 Author: Scott Moser Co-Authored-By: Chad Smith --- cloudinit/user_data.py | 22 +++++++++++++--------- tests/unittests/test_data.py | 11 ++++++++++- 2 files changed, 23 insertions(+), 10 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 8f6aba1e..ed83d2d8 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -337,8 +337,10 @@ def is_skippable(part): # Coverts a raw string into a mime message def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): + """convert a string (more likely bytes) or a message into + a mime message.""" if not raw_data: - raw_data = '' + raw_data = b'' def create_binmsg(data, content_type): maintype, subtype = content_type.split("/", 1) @@ -346,15 +348,17 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): msg.set_payload(data) return msg - try: - data = util.decode_binary(util.decomp_gzip(raw_data)) - if "mime-version:" in data[0:4096].lower(): - msg = util.message_from_string(data) - else: - msg = create_binmsg(data, content_type) - except UnicodeDecodeError: - msg = create_binmsg(raw_data, content_type) + if isinstance(raw_data, six.text_type): + bdata = raw_data.encode('utf-8') + else: + bdata = raw_data + bdata = util.decomp_gzip(bdata, decode=False) + if b"mime-version:" in bdata[0:4096].lower(): + msg = util.message_from_string(bdata.decode('utf-8')) + else: + msg = create_binmsg(bdata, content_type) return msg + # vi: ts=4 expandtab diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 91d35cb8..3efe7adf 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -606,8 +606,10 @@ class TestUDProcess(helpers.ResourceUsingTestCase): class TestConvertString(helpers.TestCase): + def test_handles_binary_non_utf8_decodable(self): - blob = b'\x32\x99' + """Printable unicode (not utf8-decodable) is safely converted.""" + blob = b'#!/bin/bash\necho \xc3\x84\n' msg = ud.convert_string(blob) self.assertEqual(blob, msg.get_payload(decode=True)) @@ -621,6 +623,13 @@ class TestConvertString(helpers.TestCase): msg = ud.convert_string(text) self.assertEqual(text, msg.get_payload(decode=False)) + def test_handle_mime_parts(self): + """Mime parts are properly returned as a mime message.""" + message = MIMEBase("text", "plain") + message.set_payload("Just text") + msg = ud.convert_string(str(message)) + self.assertEqual("Just text", msg.get_payload(decode=False)) + class TestFetchBaseConfig(helpers.TestCase): def test_only_builtin_gets_builtin(self): -- cgit v1.2.3 From fef2616b9876d3d354b0de1a8e753361e52e77b0 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Fri, 15 Jun 2018 13:41:21 -0600 Subject: stages: fix tracebacks if a module stage is undefined or empty In /etc/cloud/cloud.cfg, users and imagees can configure which modules run during a specific cloud-init stage by modifying one of the following lists: cloud_init_modules, cloud_init_modules, cloud_init_final_modules. If any of the configured module lists are absent or empty, cloud-init will emit the same message it already does for existing lists that only contain modules which are not unsupported on that platform: No 'config' modules to run under section 'cloud_config_modules' LP: #1770462 --- cloudinit/stages.py | 4 +++- tests/unittests/test_runs/test_simple_run.py | 32 ++++++++++++++++++++++++++-- 2 files changed, 33 insertions(+), 3 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 3998cf68..286607bf 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -697,7 +697,9 @@ class Modules(object): module_list = [] if name not in self.cfg: return module_list - cfg_mods = self.cfg[name] + cfg_mods = self.cfg.get(name) + if not cfg_mods: + return module_list # Create 'module_list', an array of hashes # Where hash['mod'] = module name # hash['freq'] = frequency diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 762974e9..d67c422c 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy import os @@ -127,8 +128,9 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): """run_section forced skipped modules by using unverified_modules.""" # re-write cloud.cfg with unverified_modules override - self.cfg['unverified_modules'] = ['spacewalk'] # Would have skipped - cloud_cfg = util.yaml_dumps(self.cfg) + cfg = copy.deepcopy(self.cfg) + cfg['unverified_modules'] = ['spacewalk'] # Would have skipped + cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) @@ -150,4 +152,30 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): "running unverified_modules: 'spacewalk'", self.logs.getvalue()) + def test_none_ds_run_with_no_config_modules(self): + """run_section will report no modules run when none are configured.""" + + # re-write cloud.cfg with unverified_modules override + cfg = copy.deepcopy(self.cfg) + # Represent empty configuration in /etc/cloud/cloud.cfg + cfg['cloud_init_modules'] = None + cloud_cfg = util.yaml_dumps(cfg) + util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + util.write_file(os.path.join(self.new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) + + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', initer.consume_data, + args=[PER_INSTANCE], freq=PER_INSTANCE) + + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertEqual([], which_ran) + # vi: ts=4 expandtab -- cgit v1.2.3 From 1efa8a0a030794cec68197100f31a856d0d264ab Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 15 Jun 2018 19:33:30 -0600 Subject: openstack: avoid unneeded metadata probe on non-openstack platforms OpenStack datasource is now discovered in init-local stage. In order to probe whether OpenStack metadata is present, it performs a costly sandboxed dhclient setup and metadata probe against http://169.254.169.254 for openstack data. Cloud-init properly detects non-OpenStack on EC2, but it spends precious time probing the metadata service also resulting in a confusing WARNING log about 'metadata not present'. To avoid the wasted cycles, and confusing warning, get_data will call a detect_openstack function to quickly determine whether the platform looks like OpenStack before trying to setup network to probe and crawl the metadata service. LP: #1776701 --- cloudinit/sources/DataSourceOpenStack.py | 23 ++++ cloudinit/util.py | 13 ++- doc/rtd/topics/datasources/openstack.rst | 15 +++ tests/unittests/test_datasource/test_openstack.py | 124 +++++++++++++++++++--- tests/unittests/test_util.py | 23 ++++ 5 files changed, 182 insertions(+), 16 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 1a12a3f1..365af96a 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -23,6 +23,13 @@ DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } +# OpenStack DMI constants +DMI_PRODUCT_NOVA = 'OpenStack Nova' +DMI_PRODUCT_COMPUTE = 'OpenStack Compute' +VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE] +DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud' +VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM] + class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): @@ -114,6 +121,8 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): False when unable to contact metadata service or when metadata format is invalid or disabled. """ + if not detect_openstack(): + return False if self.perform_dhcp_setup: # Setup networking in init-local stage. try: with EphemeralDHCPv4(self.fallback_interface): @@ -205,6 +214,20 @@ def read_metadata_service(base_url, ssl_details=None, return reader.read_v2() +def detect_openstack(): + """Return True when a potential OpenStack platform is detected.""" + if not util.is_x86(): + return True # Non-Intel cpus don't properly report dmi product names + product_name = util.read_dmi_data('system-product-name') + if product_name in VALID_DMI_PRODUCT_NAMES: + return True + elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS: + return True + elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA: + return True + return False + + # Used to match classes to dependencies datasources = [ (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)), diff --git a/cloudinit/util.py b/cloudinit/util.py index 26a41122..6da95113 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2629,6 +2629,16 @@ def _call_dmidecode(key, dmidecode_path): return None +def is_x86(uname_arch=None): + """Return True if platform is x86-based""" + if uname_arch is None: + uname_arch = os.uname()[4] + x86_arch_match = ( + uname_arch == 'x86_64' or + (uname_arch[0] == 'i' and uname_arch[2:] == '86')) + return x86_arch_match + + def read_dmi_data(key): """ Wrapper for reading DMI data. @@ -2656,8 +2666,7 @@ def read_dmi_data(key): # running dmidecode can be problematic on some arches (LP: #1243287) uname_arch = os.uname()[4] - if not (uname_arch == "x86_64" or - (uname_arch.startswith("i") and uname_arch[2:] == "86") or + if not (is_x86(uname_arch) or uname_arch == 'aarch64' or uname_arch == 'amd64'): LOG.debug("dmidata is not supported on %s", uname_arch) diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst index 0ea89943..421da08f 100644 --- a/doc/rtd/topics/datasources/openstack.rst +++ b/doc/rtd/topics/datasources/openstack.rst @@ -7,6 +7,21 @@ This datasource supports reading data from the `OpenStack Metadata Service `_. +Discovery +------------- +To determine whether a platform looks like it may be OpenStack, cloud-init +checks the following environment attributes as a potential OpenStack platform: + + * Maybe OpenStack if + + * **non-x86 cpu architecture**: because DMI data is buggy on some arches + * Is OpenStack **if x86 architecture and ANY** of the following + + * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova* + * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute* + * **DMI chassis_asset_tag** is *OpenTelekomCloud* + + Configuration ------------- The following configuration can be set for the datasource in system diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index fad73b21..585acc33 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -69,6 +69,8 @@ EC2_VERSIONS = [ 'latest', ] +MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.' + # TODO _register_uris should leverage test_ec2.register_mock_metaserver. def _register_uris(version, ec2_files, ec2_meta, os_files): @@ -231,7 +233,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): ds_os = ds.DataSourceOpenStack( settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) - found = ds_os.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() self.assertTrue(found) self.assertEqual(2, ds_os.version) md = dict(ds_os.metadata) @@ -260,7 +265,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'broadcast-address': '192.168.2.255'}] self.assertIsNone(ds_os_local.version) - found = ds_os_local.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os_local.get_data() self.assertTrue(found) self.assertEqual(2, ds_os_local.version) md = dict(ds_os_local.metadata) @@ -284,7 +292,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): None, helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) - found = ds_os.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) self.assertIn( @@ -306,15 +317,16 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'timeout': 0, } self.assertIsNone(ds_os.version) - found = ds_os.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) def test_network_config_disabled_by_datasource_config(self): """The network_config can be disabled from datasource config.""" - mock_path = ( - 'cloudinit.sources.DataSourceOpenStack.openstack.' - 'convert_net_json') + mock_path = MOCK_PATH + 'openstack.convert_net_json' ds_os = ds.DataSourceOpenStack( settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) ds_os.ds_cfg = {'apply_network_config': False} @@ -327,9 +339,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_network_config_from_network_json(self): """The datasource gets network_config from network_data.json.""" - mock_path = ( - 'cloudinit.sources.DataSourceOpenStack.openstack.' - 'convert_net_json') + mock_path = MOCK_PATH + 'openstack.convert_net_json' example_cfg = {'version': 1, 'config': []} ds_os = ds.DataSourceOpenStack( settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) @@ -345,9 +355,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_network_config_cached(self): """The datasource caches the network_config property.""" - mock_path = ( - 'cloudinit.sources.DataSourceOpenStack.openstack.' - 'convert_net_json') + mock_path = MOCK_PATH + 'openstack.convert_net_json' example_cfg = {'version': 1, 'config': []} ds_os = ds.DataSourceOpenStack( settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) @@ -374,7 +382,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'timeout': 0, } self.assertIsNone(ds_os.version) - found = ds_os.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) @@ -438,4 +449,89 @@ class TestVendorDataLoading(test_helpers.TestCase): data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} self.assertEqual(self.cvj(data), data['cloud-init']) + +@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86') +class TestDetectOpenStack(test_helpers.CiTestCase): + + def test_detect_openstack_non_intel_x86(self, m_is_x86): + """Return True on non-intel platforms because dmi isn't conclusive.""" + m_is_x86.return_value = False + self.assertTrue( + ds.detect_openstack(), 'Expected detect_openstack == True') + + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env, + m_is_x86): + """Return False on EC2 platforms.""" + m_is_x86.return_value = True + # No product_name in proc/1/environ + m_proc_env.return_value = {'HOME': '/'} + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' on EC2 + if dmi_key == 'chassis-asset-tag': + return '' # Empty string on EC2 + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertFalse( + ds.detect_openstack(), 'Expected detect_openstack == False on EC2') + m_proc_env.assert_called_with(1) + + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_detect_openstack_intel_product_name_compute(self, m_dmi, + m_is_x86): + """Return True on OpenStack compute and nova instances.""" + m_is_x86.return_value = True + openstack_product_names = ['OpenStack Nova', 'OpenStack Compute'] + + for product_name in openstack_product_names: + m_dmi.return_value = product_name + self.assertTrue( + ds.detect_openstack(), 'Failed to detect_openstack') + + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting OpenTelekomCloud asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud + if dmi_key == 'chassis-asset-tag': + return 'OpenTelekomCloud' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OpenTelekomCloud') + + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, + m_is_x86): + """Return True when nova product_name specified in /proc/1/environ.""" + m_is_x86.return_value = True + # Nova product_name in proc/1/environ + m_proc_env.return_value = { + 'HOME': '/', 'product_name': 'OpenStack Nova'} + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' + if dmi_key == 'chassis-asset-tag': + return '' # Nothin 'openstackish' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OpenTelekomCloud') + m_proc_env.assert_called_with(1) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 20479f66..7a203ce2 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -468,6 +468,29 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): self.assertIsNone(ret) +class TestIsX86(helpers.CiTestCase): + + def test_is_x86_matches_x86_types(self): + """is_x86 returns True if CPU architecture matches.""" + matched_arches = ['x86_64', 'i386', 'i586', 'i686'] + for arch in matched_arches: + self.assertTrue( + util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch) + + def test_is_x86_unmatched_types(self): + """is_x86 returns Fale on non-intel x86 architectures.""" + unmatched_arches = ['ia64', '9000/800', 'arm64v71'] + for arch in unmatched_arches: + self.assertFalse( + util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch) + + @mock.patch('cloudinit.util.os.uname') + def test_is_x86_calls_uname_for_architecture(self, m_uname): + """is_x86 returns True if platform from uname matches.""" + m_uname.return_value = [0, 1, 2, 3, 'x86_64'] + self.assertTrue(util.is_x86()) + + class TestReadDMIData(helpers.FilesystemMockingTestCase): def setUp(self): -- cgit v1.2.3 From 4ce6720104ec92d8d7c5aa993bf7ec405a2f53db Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 15 Jun 2018 20:00:15 -0600 Subject: lxd: Delete default network and detach device if lxd-init created them. Newer versions (3.0.1+) of lxd create the 'lxdbr0' network when 'lxd init --auto' is invoked. When cloud-init is given a network configuration to pass on to lxc and that config had no name specified or 'lxdbr0', then cloud-init would fail to create the network as it already exists. Similarly, we need to remove the device from the default profile so that the attach code can work. Also, add a _lxc method and use it to make sure we're getting the --force-local flag everywhere. LP: #1776958 --- cloudinit/config/cc_lxd.py | 64 ++++++++++++++++--- tests/unittests/test_handler/test_handler_lxd.py | 80 +++++++++++++++++++----- 2 files changed, 120 insertions(+), 24 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 09374d2e..ac72ac4a 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly. domain: """ +from cloudinit import log as logging from cloudinit import util import os distros = ['ubuntu'] +LOG = logging.getLogger(__name__) + +_DEFAULT_NETWORK_NAME = "lxdbr0" + def handle(name, cfg, cloud, log, args): # Get config @@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args): # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" if bridge_cfg: + net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) if os.path.exists("/etc/default/lxd-bridge") \ and util.which(dconf_comm): # Bridge configured through packaging @@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args): else: # Built-in LXD bridge support cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) + maybe_cleanup_default( + net_name=net_name, did_init=bool(init_cfg), + create=bool(cmd_create), attach=bool(cmd_attach)) if cmd_create: log.debug("Creating lxd bridge: %s" % " ".join(cmd_create)) - util.subp(cmd_create) + _lxc(cmd_create) if cmd_attach: log.debug("Setting up default lxd bridge: %s" % " ".join(cmd_create)) - util.subp(cmd_attach) + _lxc(cmd_attach) elif bridge_cfg: raise RuntimeError( @@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg): if bridge_cfg.get("mode") == "none": return None, None - bridge_name = bridge_cfg.get("name", "lxdbr0") + bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) cmd_create = [] - cmd_attach = ["lxc", "network", "attach-profile", bridge_name, - "default", "eth0", "--force-local"] + cmd_attach = ["network", "attach-profile", bridge_name, + "default", "eth0"] if bridge_cfg.get("mode") == "existing": return None, cmd_attach @@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg): if bridge_cfg.get("mode") != "new": raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) - cmd_create = ["lxc", "network", "create", bridge_name] + cmd_create = ["network", "create", bridge_name] if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): cmd_create.append("ipv4.address=%s/%s" % @@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg): if bridge_cfg.get("domain"): cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) - cmd_create.append("--force-local") - return cmd_create, cmd_attach + +def _lxc(cmd): + env = {'LC_ALL': 'C'} + util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) + + +def maybe_cleanup_default(net_name, did_init, create, attach, + profile="default", nic_name="eth0"): + """Newer versions of lxc (3.0.1+) create a lxdbr0 network when + 'lxd init --auto' is run. Older versions did not. + + By removing ay that lxd-init created, we simply leave the add/attach + code in-tact. + + https://github.com/lxc/lxd/issues/4649""" + if net_name != _DEFAULT_NETWORK_NAME or not did_init: + return + + fail_assume_enoent = " failed. Assuming it did not exist." + succeeded = " succeeded." + if create: + msg = "Deletion of lxd network '%s'" % net_name + try: + _lxc(["network", "delete", net_name]) + LOG.debug(msg + succeeded) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + raise e + LOG.debug(msg + fail_assume_enoent) + + if attach: + msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile) + try: + _lxc(["profile", "device", "remove", profile, nic_name]) + LOG.debug(msg + succeeded) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + raise e + LOG.debug(msg + fail_assume_enoent) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index a2054980..4dd7e09f 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -33,12 +33,16 @@ class TestLxd(t_help.CiTestCase): cc = cloud.Cloud(ds, paths, {}, d, None) return cc + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") - def test_lxd_init(self, mock_util): + def test_lxd_init(self, mock_util, m_maybe_clean): cc = self._get_cloud('ubuntu') mock_util.which.return_value = True + m_maybe_clean.return_value = None cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) self.assertTrue(mock_util.which.called) + # no bridge config, so maybe_cleanup should not be called. + self.assertFalse(m_maybe_clean.called) init_call = mock_util.subp.call_args_list[0][0][0] self.assertEqual(init_call, ['lxd', 'init', '--auto', @@ -46,32 +50,39 @@ class TestLxd(t_help.CiTestCase): '--storage-backend=zfs', '--storage-pool=poolname']) + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") - def test_lxd_install(self, mock_util): + def test_lxd_install(self, mock_util, m_maybe_clean): cc = self._get_cloud('ubuntu') cc.distro = mock.MagicMock() mock_util.which.return_value = None cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) self.assertNotIn('WARN', self.logs.getvalue()) self.assertTrue(cc.distro.install_packages.called) + cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) + self.assertFalse(m_maybe_clean.called) install_pkg = cc.distro.install_packages.call_args_list[0][0][0] self.assertEqual(sorted(install_pkg), ['lxd', 'zfs']) + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") - def test_no_init_does_nothing(self, mock_util): + def test_no_init_does_nothing(self, mock_util, m_maybe_clean): cc = self._get_cloud('ubuntu') cc.distro = mock.MagicMock() cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_util.subp.called) + self.assertFalse(m_maybe_clean.called) + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") - def test_no_lxd_does_nothing(self, mock_util): + def test_no_lxd_does_nothing(self, mock_util, m_maybe_clean): cc = self._get_cloud('ubuntu') cc.distro = mock.MagicMock() cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_util.subp.called) + self.assertFalse(m_maybe_clean.called) def test_lxd_debconf_new_full(self): data = {"mode": "new", @@ -147,14 +158,13 @@ class TestLxd(t_help.CiTestCase): "domain": "lxd"} self.assertEqual( cc_lxd.bridge_to_cmd(data), - (["lxc", "network", "create", "testbr0", + (["network", "create", "testbr0", "ipv4.address=10.0.8.1/24", "ipv4.nat=true", "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254", "ipv6.address=fd98:9e0:3744::1/64", - "ipv6.nat=true", "dns.domain=lxd", - "--force-local"], - ["lxc", "network", "attach-profile", - "testbr0", "default", "eth0", "--force-local"])) + "ipv6.nat=true", "dns.domain=lxd"], + ["network", "attach-profile", + "testbr0", "default", "eth0"])) def test_lxd_cmd_new_partial(self): data = {"mode": "new", @@ -163,19 +173,18 @@ class TestLxd(t_help.CiTestCase): "ipv6_nat": "true"} self.assertEqual( cc_lxd.bridge_to_cmd(data), - (["lxc", "network", "create", "lxdbr0", "ipv4.address=none", - "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true", - "--force-local"], - ["lxc", "network", "attach-profile", - "lxdbr0", "default", "eth0", "--force-local"])) + (["network", "create", "lxdbr0", "ipv4.address=none", + "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"], + ["network", "attach-profile", + "lxdbr0", "default", "eth0"])) def test_lxd_cmd_existing(self): data = {"mode": "existing", "name": "testbr0"} self.assertEqual( cc_lxd.bridge_to_cmd(data), - (None, ["lxc", "network", "attach-profile", - "testbr0", "default", "eth0", "--force-local"])) + (None, ["network", "attach-profile", + "testbr0", "default", "eth0"])) def test_lxd_cmd_none(self): data = {"mode": "none"} @@ -183,4 +192,43 @@ class TestLxd(t_help.CiTestCase): cc_lxd.bridge_to_cmd(data), (None, None)) + +class TestLxdMaybeCleanupDefault(t_help.CiTestCase): + """Test the implementation of maybe_cleanup_default.""" + + defnet = cc_lxd._DEFAULT_NETWORK_NAME + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_network_other_than_default_not_deleted(self, m_lxc): + """deletion or removal should only occur if bridge is default.""" + cc_lxd.maybe_cleanup_default( + net_name="lxdbr1", did_init=True, create=True, attach=True) + m_lxc.assert_not_called() + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_did_init_false_does_not_delete(self, m_lxc): + """deletion or removal should only occur if did_init is True.""" + cc_lxd.maybe_cleanup_default( + net_name=self.defnet, did_init=False, create=True, attach=True) + m_lxc.assert_not_called() + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_network_deleted_if_create_true(self, m_lxc): + """deletion of network should occur if create is True.""" + cc_lxd.maybe_cleanup_default( + net_name=self.defnet, did_init=True, create=True, attach=False) + m_lxc.assert_called_once_with(["network", "delete", self.defnet]) + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_device_removed_if_attach_true(self, m_lxc): + """deletion of network should occur if create is True.""" + nic_name = "my_nic" + profile = "my_profile" + cc_lxd.maybe_cleanup_default( + net_name=self.defnet, did_init=True, create=False, attach=True, + profile=profile, nic_name=nic_name) + m_lxc.assert_called_once_with( + ["profile", "device", "remove", profile, nic_name]) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 4d69fb44a5607e16843537be26758893f2dd79be Mon Sep 17 00:00:00 2001 From: Jacob Bednarz Date: Tue, 19 Jun 2018 16:04:17 -0600 Subject: Explicitly prevent `sudo` access for user module To deny a user elevated access, you can omit the `sudo` key from the `users` dictionary. This works fine however it's implicitly defined based on defaults of `cloud-init`. If the project moves to have `sudo` access allowed for all by default (quite unlikely but still possible) this will catch a few people out. This introduces the ability to define an explicit `sudo: False` in the `users` dictionary and it will prevent `sudo` access. The behaviour is identical to omitting the key. LP: #1771468 --- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/freebsd.py | 2 +- doc/examples/cloud-config-user-groups.txt | 27 +++++++++++++++++------ tests/unittests/test_distros/test_create_users.py | 8 +++++++ 4 files changed, 30 insertions(+), 9 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 6c22b07f..ab0b0776 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -531,7 +531,7 @@ class Distro(object): self.lock_passwd(name) # Configure sudo access - if 'sudo' in kwargs: + if 'sudo' in kwargs and kwargs['sudo'] is not False: self.write_sudo_rules(name, kwargs['sudo']) # Import SSH keys diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 5b1718a4..ff22d568 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -266,7 +266,7 @@ class Distro(distros.Distro): self.lock_passwd(name) # Configure sudo access - if 'sudo' in kwargs: + if 'sudo' in kwargs and kwargs['sudo'] is not False: self.write_sudo_rules(name, kwargs['sudo']) # Import SSH keys diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 7bca24a3..01ecad7b 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -30,6 +30,11 @@ users: gecos: Magic Cloud App Daemon User inactive: true system: true + - name: fizzbuzz + sudo: False + ssh_authorized_keys: + - + - - snapuser: joe@joeuser.io # Valid Values: @@ -71,13 +76,21 @@ users: # no_log_init: When set to true, do not initialize lastlog and faillog database. # ssh_import_id: Optional. Import SSH ids # ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file -# sudo: Defaults to none. Set to the sudo string you want to use, i.e. -# ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following -# format. -# sudo: -# - ALL=(ALL) NOPASSWD:/bin/mysql -# - ALL=(ALL) ALL -# Note: Please double check your syntax and make sure it is valid. +# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule +# strings or False to explicitly deny sudo usage. Examples: +# +# Allow a user unrestricted sudo access. +# sudo: ALL=(ALL) NOPASSWD:ALL +# +# Adding multiple sudo rule strings. +# sudo: +# - ALL=(ALL) NOPASSWD:/bin/mysql +# - ALL=(ALL) ALL +# +# Prevent sudo access for a user. +# sudo: False +# +# Note: Please double check your syntax and make sure it is valid. # cloud-init does not parse/check the syntax of the sudo # directive. # system: Create the user as a system user. This means no home directory. diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index 5670904a..07176caa 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -145,4 +145,12 @@ class TestCreateUser(TestCase): mock.call(['passwd', '-l', user])] self.assertEqual(m_subp.call_args_list, expected) + def test_explicit_sudo_false(self, m_subp, m_is_snappy): + user = 'foouser' + self.dist.create_user(user, sudo=False) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + # vi: ts=4 expandtab -- cgit v1.2.3 From c1a75a697d7cb2e6c97ad90d64c9b2b88db2034a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 9 Jul 2018 19:59:01 +0000 Subject: ubuntu,centos,debian: get_linux_distro to align with platform.dist A recent commit added get_linux_distro to replace the deprecated python platform.dist module behavior before it is dropped from python. It added behavior that was compliant on OpenSuSE and SLES, by returning (, , ). Fix get_linux_distro to behave more like the specific distribution's platform.dist on ubuntu, centos and debian, which will return the distribution release codename as the third element instead of . SLES and OpenSUSE will retain their current behavior. Examples follow: ('sles', '15', 'x86_64') ('opensuse', '42.3', 'x86_64') ('debian', '9', 'stretch') ('ubuntu', '16.04', 'xenial') ('centos', '7', 'Core') LP: #1780481 --- cloudinit/tests/test_util.py | 69 +++++++++++++++++++++- cloudinit/util.py | 28 +++++---- .../unittests/test_datasource/test_azure_helper.py | 4 +- 3 files changed, 89 insertions(+), 12 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 17853fc7..6a31e505 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -26,8 +26,51 @@ OS_RELEASE_SLES = dedent("""\ CPE_NAME="cpe:/o:suse:sles:12:sp3"\n """) +OS_RELEASE_OPENSUSE = dedent("""\ +NAME="openSUSE Leap" +VERSION="42.3" +ID=opensuse +ID_LIKE="suse" +VERSION_ID="42.3" +PRETTY_NAME="openSUSE Leap 42.3" +ANSI_COLOR="0;32" +CPE_NAME="cpe:/o:opensuse:leap:42.3" +BUG_REPORT_URL="https://bugs.opensuse.org" +HOME_URL="https://www.opensuse.org/" +""") + +OS_RELEASE_CENTOS = dedent("""\ + NAME="CentOS Linux" + VERSION="7 (Core)" + ID="centos" + ID_LIKE="rhel fedora" + VERSION_ID="7" + PRETTY_NAME="CentOS Linux 7 (Core)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:centos:centos:7" + HOME_URL="https://www.centos.org/" + BUG_REPORT_URL="https://bugs.centos.org/" + + CENTOS_MANTISBT_PROJECT="CentOS-7" + CENTOS_MANTISBT_PROJECT_VERSION="7" + REDHAT_SUPPORT_PRODUCT="centos" + REDHAT_SUPPORT_PRODUCT_VERSION="7" +""") + +OS_RELEASE_DEBIAN = dedent("""\ + PRETTY_NAME="Debian GNU/Linux 9 (stretch)" + NAME="Debian GNU/Linux" + VERSION_ID="9" + VERSION="9 (stretch)" + ID=debian + HOME_URL="https://www.debian.org/" + SUPPORT_URL="https://www.debian.org/support" + BUG_REPORT_URL="https://bugs.debian.org/" +""") + OS_RELEASE_UBUNTU = dedent("""\ NAME="Ubuntu"\n + # comment test VERSION="16.04.3 LTS (Xenial Xerus)"\n ID=ubuntu\n ID_LIKE=debian\n @@ -310,7 +353,31 @@ class TestGetLinuxDistro(CiTestCase): m_os_release.return_value = OS_RELEASE_UBUNTU m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('ubuntu', '16.04', platform.machine()), dist) + self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_centos(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on CentOS.""" + m_os_release.return_value = OS_RELEASE_CENTOS + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '7', 'Core'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_debian(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on Debian.""" + m_os_release.return_value = OS_RELEASE_DEBIAN + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('debian', '9', 'stretch'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on OpenSUSE.""" + m_os_release.return_value = OS_RELEASE_OPENSUSE + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('opensuse', '42.3', platform.machine()), dist) @mock.patch('platform.dist') def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): diff --git a/cloudinit/util.py b/cloudinit/util.py index 6da95113..d0b0e90a 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -579,16 +579,24 @@ def get_cfg_option_int(yobj, key, default=0): def get_linux_distro(): distro_name = '' distro_version = '' + flavor = '' if os.path.exists('/etc/os-release'): - os_release = load_file('/etc/os-release') - for line in os_release.splitlines(): - if line.strip().startswith('ID='): - distro_name = line.split('=')[-1] - distro_name = distro_name.replace('"', '') - if line.strip().startswith('VERSION_ID='): - # Lets hope for the best that distros stay consistent ;) - distro_version = line.split('=')[-1] - distro_version = distro_version.replace('"', '') + os_release = load_shell_content(load_file('/etc/os-release')) + distro_name = os_release.get('ID', '') + distro_version = os_release.get('VERSION_ID', '') + if 'sles' in distro_name or 'suse' in distro_name: + # RELEASE_BLOCKER: We will drop this sles ivergent behavior in + # before 18.4 so that get_linux_distro returns a named tuple + # which will include both version codename and architecture + # on all distributions. + flavor = platform.machine() + else: + flavor = os_release.get('VERSION_CODENAME', '') + if not flavor: + match = re.match(r'[^ ]+ \((?P[^)]+)\)', + os_release.get('VERSION')) + if match: + flavor = match.groupdict()['codename'] else: dist = ('', '', '') try: @@ -606,7 +614,7 @@ def get_linux_distro(): 'expansion may have unexpected results') return dist - return (distro_name, distro_version, platform.machine()) + return (distro_name, distro_version, flavor) def system_info(): diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index af9d3e1a..26b2b93d 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -85,7 +85,9 @@ class TestFindEndpoint(CiTestCase): self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}} self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None)) - def test_latest_lease_used(self): + @mock.patch('cloudinit.sources.helpers.azure.util.is_FreeBSD') + def test_latest_lease_used(self, m_is_freebsd): + m_is_freebsd.return_value = False # To avoid hitting load_file encoded_addresses = ['5:4:3:2', '4:3:2:1'] file_content = '\n'.join([self._build_lease_content(encoded_address) for encoded_address in encoded_addresses]) -- cgit v1.2.3 From 2a9d6203d946484cbe24297fe0e48a42a800756a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 21 Jul 2018 15:30:58 +0000 Subject: pylint: Fix pylint warnings reported in pylint 2.0.0. Pylint 2.0.0 was recently released and complains more about logging-not-lazy than it used to. I've fixed those warnings, here. The changes in rh_subscription are more extensive. pylint may be complaining incorrectly there, but the tests were not correctly un-doing all of their mock/patching. This cleans those up and makes pylint happy. --- cloudinit/config/cc_lxd.py | 16 +-- cloudinit/config/cc_rh_subscription.py | 43 ++++---- cloudinit/sources/DataSourceSmartOS.py | 2 +- cloudinit/warnings.py | 2 +- tests/unittests/test_rh_subscription.py | 185 ++++++++++++++++---------------- 5 files changed, 124 insertions(+), 124 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index ac72ac4a..a604825a 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -276,27 +276,27 @@ def maybe_cleanup_default(net_name, did_init, create, attach, if net_name != _DEFAULT_NETWORK_NAME or not did_init: return - fail_assume_enoent = " failed. Assuming it did not exist." - succeeded = " succeeded." + fail_assume_enoent = "failed. Assuming it did not exist." + succeeded = "succeeded." if create: - msg = "Deletion of lxd network '%s'" % net_name + msg = "Deletion of lxd network '%s' %s" try: _lxc(["network", "delete", net_name]) - LOG.debug(msg + succeeded) + LOG.debug(msg, net_name, succeeded) except util.ProcessExecutionError as e: if e.exit_code != 1: raise e - LOG.debug(msg + fail_assume_enoent) + LOG.debug(msg, net_name, fail_assume_enoent) if attach: - msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile) + msg = "Removal of device '%s' from profile '%s' %s" try: _lxc(["profile", "device", "remove", profile, nic_name]) - LOG.debug(msg + succeeded) + LOG.debug(msg, nic_name, profile, succeeded) except util.ProcessExecutionError as e: if e.exit_code != 1: raise e - LOG.debug(msg + fail_assume_enoent) + LOG.debug(msg, nic_name, profile, fail_assume_enoent) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 1c679430..edee01e5 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -126,7 +126,6 @@ class SubscriptionManager(object): self.enable_repo = self.rhel_cfg.get('enable-repo') self.disable_repo = self.rhel_cfg.get('disable-repo') self.servicelevel = self.rhel_cfg.get('service-level') - self.subman = ['subscription-manager'] def log_success(self, msg): '''Simple wrapper for logging info messages. Useful for unittests''' @@ -173,21 +172,12 @@ class SubscriptionManager(object): cmd = ['identity'] try: - self._sub_man_cli(cmd) + _sub_man_cli(cmd) except util.ProcessExecutionError: return False return True - def _sub_man_cli(self, cmd, logstring_val=False): - ''' - Uses the prefered cloud-init subprocess def of util.subp - and runs subscription-manager. Breaking this to a - separate function for later use in mocking and unittests - ''' - cmd = self.subman + cmd - return util.subp(cmd, logstring=logstring_val) - def rhn_register(self): ''' Registers the system by userid and password or activation key @@ -209,7 +199,7 @@ class SubscriptionManager(object): cmd.append("--serverurl={0}".format(self.server_hostname)) try: - return_out = self._sub_man_cli(cmd, logstring_val=True)[0] + return_out = _sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -232,7 +222,7 @@ class SubscriptionManager(object): # Attempting to register the system only try: - return_out = self._sub_man_cli(cmd, logstring_val=True)[0] + return_out = _sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -255,7 +245,7 @@ class SubscriptionManager(object): .format(self.servicelevel)] try: - return_out = self._sub_man_cli(cmd)[0] + return_out = _sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: if e.stdout.rstrip() != '': for line in e.stdout.split("\n"): @@ -273,7 +263,7 @@ class SubscriptionManager(object): def _set_auto_attach(self): cmd = ['attach', '--auto'] try: - return_out = self._sub_man_cli(cmd)[0] + return_out = _sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: self.log_warn("Auto-attach failed with: {0}".format(e)) return False @@ -292,12 +282,12 @@ class SubscriptionManager(object): # Get all available pools cmd = ['list', '--available', '--pool-only'] - results = self._sub_man_cli(cmd)[0] + results = _sub_man_cli(cmd)[0] available = (results.rstrip()).split("\n") # Get all consumed pools cmd = ['list', '--consumed', '--pool-only'] - results = self._sub_man_cli(cmd)[0] + results = _sub_man_cli(cmd)[0] consumed = (results.rstrip()).split("\n") return available, consumed @@ -309,14 +299,14 @@ class SubscriptionManager(object): ''' cmd = ['repos', '--list-enabled'] - return_out = self._sub_man_cli(cmd)[0] + return_out = _sub_man_cli(cmd)[0] active_repos = [] for repo in return_out.split("\n"): if "Repo ID:" in repo: active_repos.append((repo.split(':')[1]).strip()) cmd = ['repos', '--list-disabled'] - return_out = self._sub_man_cli(cmd)[0] + return_out = _sub_man_cli(cmd)[0] inactive_repos = [] for repo in return_out.split("\n"): @@ -346,7 +336,7 @@ class SubscriptionManager(object): if len(pool_list) > 0: cmd.extend(pool_list) try: - self._sub_man_cli(cmd) + _sub_man_cli(cmd) self.log.debug("Attached the following pools to your " "system: %s", (", ".join(pool_list)) .replace('--pool=', '')) @@ -423,7 +413,7 @@ class SubscriptionManager(object): cmd.extend(enable_list) try: - self._sub_man_cli(cmd) + _sub_man_cli(cmd) except util.ProcessExecutionError as e: self.log_warn("Unable to alter repos due to {0}".format(e)) return False @@ -439,4 +429,15 @@ class SubscriptionManager(object): def is_configured(self): return bool((self.userid and self.password) or self.activation_key) + +def _sub_man_cli(cmd, logstring_val=False): + ''' + Uses the prefered cloud-init subprocess def of util.subp + and runs subscription-manager. Breaking this to a + separate function for later use in mocking and unittests + ''' + return util.subp(['subscription-manager'] + cmd, + logstring=logstring_val) + + # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index f92e8b5c..ad8cfb9c 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -564,7 +564,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient): continue LOG.warning('Unexpected response "%s" during flush', response) except JoyentMetadataTimeoutException: - LOG.warning('Timeout while initializing metadata client. ' + + LOG.warning('Timeout while initializing metadata client. ' 'Is the host metadata service running?') LOG.debug('Got "invalid command". Flush complete.') self.fp.timeout = timeout diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py index f9f7a63c..1da90c40 100644 --- a/cloudinit/warnings.py +++ b/cloudinit/warnings.py @@ -130,7 +130,7 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs): os.path.join(_get_warn_dir(cfg), name), topline + "\n".join(fmtlines) + "\n" + topline) - LOG.warning(topline + "\n".join(fmtlines) + "\n" + closeline) + LOG.warning("%s%s\n%s", topline, "\n".join(fmtlines), closeline) if sleep: LOG.debug("sleeping %d seconds for warning '%s'", sleep, name) diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py index 22718108..4cd27eed 100644 --- a/tests/unittests/test_rh_subscription.py +++ b/tests/unittests/test_rh_subscription.py @@ -8,10 +8,16 @@ import logging from cloudinit.config import cc_rh_subscription from cloudinit import util -from cloudinit.tests.helpers import TestCase, mock +from cloudinit.tests.helpers import CiTestCase, mock +SUBMGR = cc_rh_subscription.SubscriptionManager +SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli' + + +@mock.patch(SUB_MAN_CLI) +class GoodTests(CiTestCase): + with_logs = True -class GoodTests(TestCase): def setUp(self): super(GoodTests, self).setUp() self.name = "cc_rh_subscription" @@ -19,7 +25,6 @@ class GoodTests(TestCase): self.log = logging.getLogger("good_tests") self.args = [] self.handle = cc_rh_subscription.handle - self.SM = cc_rh_subscription.SubscriptionManager self.config = {'rh_subscription': {'username': 'scooby@do.com', @@ -35,55 +40,47 @@ class GoodTests(TestCase): 'disable-repo': ['repo4', 'repo5'] }} - def test_already_registered(self): + def test_already_registered(self, m_sman_cli): ''' Emulates a system that is already registered. Ensure it gets a non-ProcessExecution error from is_registered() ''' - with mock.patch.object(cc_rh_subscription.SubscriptionManager, - '_sub_man_cli') as mockobj: - self.SM.log_success = mock.MagicMock() - self.handle(self.name, self.config, self.cloud_init, - self.log, self.args) - self.assertEqual(self.SM.log_success.call_count, 1) - self.assertEqual(mockobj.call_count, 1) - - def test_simple_registration(self): + self.handle(self.name, self.config, self.cloud_init, + self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 1) + self.assertIn('System is already registered', self.logs.getvalue()) + + def test_simple_registration(self, m_sman_cli): ''' Simple registration with username and password ''' - self.SM.log_success = mock.MagicMock() reg = "The system has been registered with ID:" \ " 12345678-abde-abcde-1234-1234567890abc" - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (reg, 'bar')]) + m_sman_cli.side_effect = [util.ProcessExecutionError, (reg, 'bar')] self.handle(self.name, self.config, self.cloud_init, self.log, self.args) - self.assertIn(mock.call(['identity']), - self.SM._sub_man_cli.call_args_list) + self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list) self.assertIn(mock.call(['register', '--username=scooby@do.com', '--password=scooby-snacks'], logstring_val=True), - self.SM._sub_man_cli.call_args_list) - - self.assertEqual(self.SM.log_success.call_count, 1) - self.assertEqual(self.SM._sub_man_cli.call_count, 2) + m_sman_cli.call_args_list) + self.assertIn('rh_subscription plugin completed successfully', + self.logs.getvalue()) + self.assertEqual(m_sman_cli.call_count, 2) @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos") - @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_sub_man_cli") - def test_update_repos_disable_with_none(self, m_sub_man_cli, m_get_repos): + def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli): cfg = copy.deepcopy(self.config) m_get_repos.return_value = ([], ['repo1']) - m_sub_man_cli.return_value = (b'', b'') cfg['rh_subscription'].update( {'enable-repo': ['repo1'], 'disable-repo': None}) mysm = cc_rh_subscription.SubscriptionManager(cfg) self.assertEqual(True, mysm.update_repos()) m_get_repos.assert_called_with() - self.assertEqual(m_sub_man_cli.call_args_list, + self.assertEqual(m_sman_cli.call_args_list, [mock.call(['repos', '--enable=repo1'])]) - def test_full_registration(self): + def test_full_registration(self, m_sman_cli): ''' Registration with auto-attach, service-level, adding pools, and enabling and disabling yum repos @@ -93,26 +90,28 @@ class GoodTests(TestCase): call_lists.append(['repos', '--disable=repo5', '--enable=repo2', '--enable=repo3']) call_lists.append(['attach', '--auto', '--servicelevel=self-support']) - self.SM.log_success = mock.MagicMock() reg = "The system has been registered with ID:" \ " 12345678-abde-abcde-1234-1234567890abc" - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (reg, 'bar'), - ('Service level set to: self-support', ''), - ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''), - ('Repo ID: repo1\nRepo ID: repo5\n', ''), - ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: ' - 'repo4', ''), - ('', '')]) + m_sman_cli.side_effect = [ + util.ProcessExecutionError, + (reg, 'bar'), + ('Service level set to: self-support', ''), + ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''), + ('Repo ID: repo1\nRepo ID: repo5\n', ''), + ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''), + ('', '')] self.handle(self.name, self.config_full, self.cloud_init, self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 9) for call in call_lists: - self.assertIn(mock.call(call), self.SM._sub_man_cli.call_args_list) - self.assertEqual(self.SM.log_success.call_count, 1) - self.assertEqual(self.SM._sub_man_cli.call_count, 9) + self.assertIn(mock.call(call), m_sman_cli.call_args_list) + self.assertIn("rh_subscription plugin completed successfully", + self.logs.getvalue()) -class TestBadInput(TestCase): +@mock.patch(SUB_MAN_CLI) +class TestBadInput(CiTestCase): + with_logs = True name = "cc_rh_subscription" cloud_init = None log = logging.getLogger("bad_tests") @@ -155,81 +154,81 @@ class TestBadInput(TestCase): super(TestBadInput, self).setUp() self.handle = cc_rh_subscription.handle - def test_no_password(self): - ''' - Attempt to register without the password key/value - ''' - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + def assert_logged_warnings(self, warnings): + logs = self.logs.getvalue() + missing = [w for w in warnings if "WARNING: " + w not in logs] + self.assertEqual([], missing, "Missing expected warnings.") + + def test_no_password(self, m_sman_cli): + '''Attempt to register without the password key/value.''' + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_no_password, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM._sub_man_cli.call_count, 0) + self.assertEqual(m_sman_cli.call_count, 0) - def test_no_org(self): - ''' - Attempt to register without the org key/value - ''' - self.input_is_missing_data(self.config_no_key) - - def test_service_level_without_auto(self): - ''' - Attempt to register using service-level without the auto-attach key - ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + def test_no_org(self, m_sman_cli): + '''Attempt to register without the org key/value.''' + m_sman_cli.side_effect = [util.ProcessExecutionError] + self.handle(self.name, self.config_no_key, self.cloud_init, + self.log, self.args) + m_sman_cli.assert_called_with(['identity']) + self.assertEqual(m_sman_cli.call_count, 1) + self.assert_logged_warnings(( + 'Unable to register system due to incomplete information.', + 'Use either activationkey and org *or* userid and password', + 'Registration failed or did not run completely', + 'rh_subscription plugin did not complete successfully')) + + def test_service_level_without_auto(self, m_sman_cli): + '''Attempt to register using service-level without auto-attach key.''' + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_service, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM._sub_man_cli.call_count, 1) - self.assertEqual(self.SM.log_warn.call_count, 2) + self.assertEqual(m_sman_cli.call_count, 1) + self.assert_logged_warnings(( + 'The service-level key must be used in conjunction with ', + 'rh_subscription plugin did not complete successfully')) - def test_pool_not_a_list(self): + def test_pool_not_a_list(self, m_sman_cli): ''' Register with pools that are not in the format of a list ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_badpool, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM._sub_man_cli.call_count, 2) - self.assertEqual(self.SM.log_warn.call_count, 2) + self.assertEqual(m_sman_cli.call_count, 2) + self.assert_logged_warnings(( + 'Pools must in the format of a list', + 'rh_subscription plugin did not complete successfully')) - def test_repo_not_a_list(self): + def test_repo_not_a_list(self, m_sman_cli): ''' Register with repos that are not in the format of a list ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_badrepo, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM.log_warn.call_count, 3) - self.assertEqual(self.SM._sub_man_cli.call_count, 2) + self.assertEqual(m_sman_cli.call_count, 2) + self.assert_logged_warnings(( + 'Repo IDs must in the format of a list.', + 'Unable to add or remove repos', + 'rh_subscription plugin did not complete successfully')) - def test_bad_key_value(self): + def test_bad_key_value(self, m_sman_cli): ''' Attempt to register with a key that we don't know ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError, (self.reg, 'bar')]) + m_sman_cli.side_effect = [util.ProcessExecutionError, + (self.reg, 'bar')] self.handle(self.name, self.config_badkey, self.cloud_init, self.log, self.args) - self.assertEqual(self.SM.log_warn.call_count, 2) - self.assertEqual(self.SM._sub_man_cli.call_count, 1) - - def input_is_missing_data(self, config): - ''' - Helper def for tests that having missing information - ''' - self.SM.log_warn = mock.MagicMock() - self.SM._sub_man_cli = mock.MagicMock( - side_effect=[util.ProcessExecutionError]) - self.handle(self.name, config, self.cloud_init, - self.log, self.args) - self.SM._sub_man_cli.assert_called_with(['identity']) - self.assertEqual(self.SM.log_warn.call_count, 4) - self.assertEqual(self.SM._sub_man_cli.call_count, 1) + self.assertEqual(m_sman_cli.call_count, 1) + self.assert_logged_warnings(( + 'fookey is not a valid key for rh_subscription. Valid keys are:', + 'rh_subscription plugin did not complete successfully')) # vi: ts=4 expandtab -- cgit v1.2.3 From 3cee0bf85fbf12d272422c8eeed63bf06e64570b Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 31 Jul 2018 18:44:12 +0000 Subject: oracle: fix detect_openstack to report True on OracleCloud.com DMI data The OpenStack datasource in 18.3 changed to detect data in the init-local stage instead of init-network and attempted to redetect OpenStackLocal datasource on Oracle across reboots. The function detect_openstack was added to quickly detect whether a platform is OpenStack based on dmi product_name or chassis_asset_tag and it was a bit too strict for Oracle in checking for 'OpenStack Nova'/'Compute' DMI product_name. Oracle's DMI product_name reports 'SAtandard PC (i440FX + PIIX, 1996)' and DMI chassis_asset_tag is 'OracleCloud.com'. detect_openstack function now adds 'OracleCloud.com' as a supported value 'OracleCloud.com' to valid chassis-asset-tags for the OpenStack datasource. LP: #1784685 --- cloudinit/sources/DataSourceOpenStack.py | 3 ++- tests/unittests/test_datasource/test_openstack.py | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 365af96a..b9ade90d 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -28,7 +28,8 @@ DMI_PRODUCT_NOVA = 'OpenStack Nova' DMI_PRODUCT_COMPUTE = 'OpenStack Compute' VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE] DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud' -VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM] +DMI_ASSET_TAG_ORACLE_CLOUD = 'OracleCloud.com' +VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_ORACLE_CLOUD] class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 585acc33..d862f4bc 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -510,6 +510,24 @@ class TestDetectOpenStack(test_helpers.CiTestCase): ds.detect_openstack(), 'Expected detect_openstack == True on OpenTelekomCloud') + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting Oracle cloud asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'Standard PC (i440FX + PIIX, 1996)' # No match + if dmi_key == 'chassis-asset-tag': + return 'OracleCloud.com' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OracleCloud.com') + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, -- cgit v1.2.3 From 01cfa711bcf31c6c0019d1e936b5b07aa6abc2bc Mon Sep 17 00:00:00 2001 From: Akihiko Ota Date: Fri, 3 Aug 2018 20:44:59 +0000 Subject: OpenNebula: Fix null gateway6 The OpenNebula data source generates an invalid netplan yaml file if the IPv6 gateway is not defined in context.sh. LP: #1768547 --- cloudinit/sources/DataSourceOpenNebula.py | 2 +- tests/unittests/test_datasource/test_opennebula.py | 408 ++++++++++++++++++++- 2 files changed, 407 insertions(+), 3 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 16c10785..77ccd128 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -232,7 +232,7 @@ class OpenNebulaNetwork(object): # Set IPv6 default gateway gateway6 = self.get_gateway6(c_dev) - if gateway: + if gateway6: devconf['gateway6'] = gateway6 # Set DNS servers and search domains diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index ab42f344..36b4d771 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -354,6 +354,412 @@ class TestOpenNebulaNetwork(unittest.TestCase): system_nics = ('eth0', 'ens3') + def test_context_devname(self): + """Verify context_devname correctly returns mac and name.""" + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH1_MAC': '02:00:0a:12:0f:0f', } + expected = { + '02:00:0a:12:01:01': 'ETH0', + '02:00:0a:12:0f:0f': 'ETH1', } + net = ds.OpenNebulaNetwork(context) + self.assertEqual(expected, net.context_devname) + + def test_get_nameservers(self): + """ + Verify get_nameservers('device') correctly returns DNS server addresses + and search domains. + """ + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + expected = { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']} + net = ds.OpenNebulaNetwork(context) + val = net.get_nameservers('eth0') + self.assertEqual(expected, val) + + def test_get_mtu(self): + """Verify get_mtu('device') correctly returns MTU size.""" + context = {'ETH0_MTU': '1280'} + net = ds.OpenNebulaNetwork(context) + val = net.get_mtu('eth0') + self.assertEqual('1280', val) + + def test_get_ip(self): + """Verify get_ip('device') correctly returns IPv4 address.""" + context = {'ETH0_IP': PUBLIC_IP} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(PUBLIC_IP, val) + + def test_get_ip_emptystring(self): + """ + Verify get_ip('device') correctly returns IPv4 address. + It returns IP address created by MAC address if ETH0_IP has empty + string. + """ + context = {'ETH0_IP': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(IP_BY_MACADDR, val) + + def test_get_ip6(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': '', } + expected = [IP6_GLOBAL] + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_ula(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_ULA] + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_dual(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_GLOBAL, IP6_ULA] + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_prefix(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6_prefix('eth0') + self.assertEqual(IP6_PREFIX, val) + + def test_get_ip6_prefix_emptystring(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty + string. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_ip6_prefix('eth0') + self.assertEqual('64', val) + + def test_get_gateway(self): + """ + Verify get_gateway('device') correctly returns IPv4 default gateway + address. + """ + context = {'ETH0_GATEWAY': '1.2.3.5'} + net = ds.OpenNebulaNetwork(context) + val = net.get_gateway('eth0') + self.assertEqual('1.2.3.5', val) + + def test_get_gateway6(self): + """ + Verify get_gateway6('device') correctly returns IPv6 default gateway + address. + """ + context = {'ETH0_GATEWAY6': IP6_GW} + net = ds.OpenNebulaNetwork(context) + val = net.get_gateway6('eth0') + self.assertEqual(IP6_GW, val) + + def test_get_mask(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + """ + context = {'ETH0_MASK': '255.255.0.0'} + net = ds.OpenNebulaNetwork(context) + val = net.get_mask('eth0') + self.assertEqual('255.255.0.0', val) + + def test_get_mask_emptystring(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + It returns default value '255.255.255.0' if ETH0_MASK has empty string. + """ + context = {'ETH0_MASK': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_mask('eth0') + self.assertEqual('255.255.255.0', val) + + def test_get_network(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + """ + context = {'ETH0_NETWORK': '1.2.3.0'} + net = ds.OpenNebulaNetwork(context) + val = net.get_network('eth0', MACADDR) + self.assertEqual('1.2.3.0', val) + + def test_get_network_emptystring(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + It returns network address created by MAC address if ETH0_NETWORK has + empty string. + """ + context = {'ETH0_NETWORK': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_network('eth0', MACADDR) + self.assertEqual('10.18.1.0', val) + + def test_get_field(self): + """ + Verify get_field('device', 'name') returns *context* value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue(self): + """ + Verify get_field('device', 'name', 'default value') returns *context* + value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue_emptycontext(self): + """ + Verify get_field('device', 'name', 'default value') returns *default* + value if context value is empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DEFAULT_VALUE', val) + + def test_get_field_emptycontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + def test_get_field_nonecontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + None. + """ + context = {'ETH9_DUMMY': None} + net = ds.OpenNebulaNetwork(context) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway(self, m_get_phys_by_mac): + """Test rendering with/without IPv4 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '1.2.3.5', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway4': '1.2.3.5', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway6(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': IP6_GW, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway6': IP6_GW, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_ipv6address(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 address""" + self.maxDiff = None + # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': '', + 'ETH0_IP6_PREFIX_LENGTH': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_IP6_ULA': IP6_ULA, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [ + IP_BY_MACADDR + '/' + IP4_PREFIX, + IP6_GLOBAL + '/' + IP6_PREFIX, + IP6_ULA + '/' + IP6_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_dns(self, m_get_phys_by_mac): + """Test rendering with/without DNS server, search domain""" + self.maxDiff = None + # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '', + 'ETH0_DNS': '', + 'ETH0_SEARCH_DOMAIN': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']}, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_mtu(self, m_get_phys_by_mac): + """Test rendering with/without MTU""" + self.maxDiff = None + # empty ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '1280', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'mtu': '1280', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context) + self.assertEqual(net.gen_conf(), expected) + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_eth0(self, m_get_phys_by_mac): for nic in self.system_nics: @@ -395,7 +801,6 @@ class TestOpenNebulaNetwork(unittest.TestCase): 'match': {'macaddress': MACADDR}, 'addresses': [IP_BY_MACADDR + '/16'], 'gateway4': '1.2.3.5', - 'gateway6': None, 'nameservers': { 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}} @@ -494,7 +899,6 @@ class TestOpenNebulaNetwork(unittest.TestCase): 'match': {'macaddress': MAC_1}, 'addresses': ['10.3.1.3/16'], 'gateway4': '10.3.0.1', - 'gateway6': None, 'nameservers': { 'addresses': ['10.3.1.2', '1.2.3.8'], 'search': [ -- cgit v1.2.3 From a6f95c72259f2890e4a9f9f11166310812173c68 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 6 Aug 2018 16:50:51 +0000 Subject: tools: Add 'net-convert' subcommand command to 'cloud-init devel'. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the tools/net-convert.py to be exposed as part of 'cloud-init devel' subcommands. It can now be called like: $ cloud-init devel net-convert Or, if you just have checked out source (and no cli executable):   $ python3 -m cloudinit.cmd.devel.net_convert or   $ python3 -m cloudinit.cmd.main devel net-convert --- bash_completion/cloud-init | 7 ++- cloudinit/cmd/devel/net_convert.py | 115 +++++++++++++++++++++++++++++++++++++ cloudinit/cmd/devel/parser.py | 20 ++++--- tests/unittests/test_cli.py | 3 +- tools/net-convert.py | 104 --------------------------------- 5 files changed, 134 insertions(+), 115 deletions(-) create mode 100755 cloudinit/cmd/devel/net_convert.py delete mode 100755 tools/net-convert.py (limited to 'tests/unittests') diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init index 581432c8..f38164b0 100644 --- a/bash_completion/cloud-init +++ b/bash_completion/cloud-init @@ -28,7 +28,7 @@ _cloudinit_complete() COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word)) ;; devel) - COMPREPLY=($(compgen -W "--help schema" -- $cur_word)) + COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word)) ;; dhclient-hook|features) COMPREPLY=($(compgen -W "--help" -- $cur_word)) @@ -59,6 +59,9 @@ _cloudinit_complete() --frequency) COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word)) ;; + net-convert) + COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word)) + ;; schema) COMPREPLY=($(compgen -W "--help --config-file --doc --annotate" -- $cur_word)) ;; @@ -74,4 +77,4 @@ _cloudinit_complete() } complete -F _cloudinit_complete cloud-init -# vi: syntax=bash expandtab +# vi: syntax=sh expandtab diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py new file mode 100755 index 00000000..1ec08a3c --- /dev/null +++ b/cloudinit/cmd/devel/net_convert.py @@ -0,0 +1,115 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Debug network config format conversions.""" +import argparse +import json +import os +import sys +import yaml + +from cloudinit.sources.helpers import openstack + +from cloudinit.net import eni, netplan, network_state, sysconfig +from cloudinit import log + +NAME = 'net-convert' + + +def get_parser(parser=None): + """Build or extend and arg parser for net-convert utility. + + @param parser: Optional existing ArgumentParser instance representing the + subcommand which will be extended to support the args of this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) + parser.add_argument("-p", "--network-data", type=open, + metavar="PATH", required=True) + parser.add_argument("-k", "--kind", + choices=['eni', 'network_data.json', 'yaml'], + required=True) + parser.add_argument("-d", "--directory", + metavar="PATH", + help="directory to place output in", + required=True) + parser.add_argument("-m", "--mac", + metavar="name,mac", + action='append', + help="interface name to mac mapping") + parser.add_argument("--debug", action='store_true', + help='enable debug logging to stderr.') + parser.add_argument("-O", "--output-kind", + choices=['eni', 'netplan', 'sysconfig'], + required=True) + return parser + + +def handle_args(name, args): + if not args.directory.endswith("/"): + args.directory += "/" + + if not os.path.isdir(args.directory): + os.makedirs(args.directory) + + if args.debug: + log.setupBasicLogging(level=log.DEBUG) + else: + log.setupBasicLogging(level=log.WARN) + if args.mac: + known_macs = {} + for item in args.mac: + iface_name, iface_mac = item.split(",", 1) + known_macs[iface_mac] = iface_name + else: + known_macs = None + + net_data = args.network_data.read() + if args.kind == "eni": + pre_ns = eni.convert_eni_data(net_data) + ns = network_state.parse_net_config_data(pre_ns) + elif args.kind == "yaml": + pre_ns = yaml.load(net_data) + if 'network' in pre_ns: + pre_ns = pre_ns.get('network') + if args.debug: + sys.stderr.write('\n'.join( + ["Input YAML", + yaml.dump(pre_ns, default_flow_style=False, indent=4), ""])) + ns = network_state.parse_net_config_data(pre_ns) + else: + pre_ns = openstack.convert_net_json( + json.loads(net_data), known_macs=known_macs) + ns = network_state.parse_net_config_data(pre_ns) + + if not ns: + raise RuntimeError("No valid network_state object created from" + "input data") + + if args.debug: + sys.stderr.write('\n'.join([ + "", "Internal State", + yaml.dump(ns, default_flow_style=False, indent=4), ""])) + if args.output_kind == "eni": + r_cls = eni.Renderer + elif args.output_kind == "netplan": + r_cls = netplan.Renderer + else: + r_cls = sysconfig.Renderer + + r = r_cls() + sys.stderr.write(''.join([ + "Read input format '%s' from '%s'.\n" % ( + args.kind, args.network_data.name), + "Wrote output format '%s' to '%s'\n" % ( + args.output_kind, args.directory)]) + "\n") + r.render_network_state(network_state=ns, target=args.directory) + + +if __name__ == '__main__': + args = get_parser().parse_args() + handle_args(NAME, args) + + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index acacc4ed..40a4b019 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -5,8 +5,9 @@ """Define 'devel' subcommand argument parsers to include in cloud-init cmd.""" import argparse -from cloudinit.config.schema import ( - get_parser as schema_parser, handle_schema_args) +from cloudinit.config import schema + +from . import net_convert def get_parser(parser=None): @@ -17,10 +18,15 @@ def get_parser(parser=None): subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') subparsers.required = True - parser_schema = subparsers.add_parser( - 'schema', help='Validate cloud-config files or document schema') - # Construct schema subcommand parser - schema_parser(parser_schema) - parser_schema.set_defaults(action=('schema', handle_schema_args)) + subcmds = [ + ('schema', 'Validate cloud-config files for document schema', + schema.get_parser, schema.handle_schema_args), + (net_convert.NAME, net_convert.__doc__, + net_convert.get_parser, net_convert.handle_args) + ] + for (subcmd, helpmsg, get_parser, handler) in subcmds: + parser = subparsers.add_parser(subcmd, help=helpmsg) + get_parser(parser) + parser.set_defaults(action=(subcmd, handler)) return parser diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 0c0f427a..199d69b0 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -208,8 +208,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): for subcommand in expected_subcommands: self.assertIn(subcommand, error) - @mock.patch('cloudinit.config.schema.handle_schema_args') - def test_wb_devel_schema_subcommand_parser(self, m_schema): + def test_wb_devel_schema_subcommand_parser(self): """The subcommand cloud-init schema calls the correct subparser.""" exit_code = self._call_main(['cloud-init', 'devel', 'schema']) self.assertEqual(1, exit_code) diff --git a/tools/net-convert.py b/tools/net-convert.py deleted file mode 100755 index d1a4a646..00000000 --- a/tools/net-convert.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/python3 -# This file is part of cloud-init. See LICENSE file for license information. - -import argparse -import json -import os -import sys -import yaml - -from cloudinit.sources.helpers import openstack - -from cloudinit.net import eni -from cloudinit import log -from cloudinit.net import netplan -from cloudinit.net import network_state -from cloudinit.net import sysconfig - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--network-data", "-p", type=open, - metavar="PATH", required=True) - parser.add_argument("--kind", "-k", - choices=['eni', 'network_data.json', 'yaml'], - required=True) - parser.add_argument("-d", "--directory", - metavar="PATH", - help="directory to place output in", - required=True) - parser.add_argument("-m", "--mac", - metavar="name,mac", - action='append', - help="interface name to mac mapping") - parser.add_argument("--debug", action='store_true', - help='enable debug logging to stderr.') - parser.add_argument("--output-kind", "-ok", - choices=['eni', 'netplan', 'sysconfig'], - required=True) - args = parser.parse_args() - - if not args.directory.endswith("/"): - args.directory += "/" - - if not os.path.isdir(args.directory): - os.makedirs(args.directory) - - if args.debug: - log.setupBasicLogging(level=log.DEBUG) - else: - log.setupBasicLogging(level=log.WARN) - if args.mac: - known_macs = {} - for item in args.mac: - iface_name, iface_mac = item.split(",", 1) - known_macs[iface_mac] = iface_name - else: - known_macs = None - - net_data = args.network_data.read() - if args.kind == "eni": - pre_ns = eni.convert_eni_data(net_data) - ns = network_state.parse_net_config_data(pre_ns) - elif args.kind == "yaml": - pre_ns = yaml.load(net_data) - if 'network' in pre_ns: - pre_ns = pre_ns.get('network') - if args.debug: - sys.stderr.write('\n'.join( - ["Input YAML", - yaml.dump(pre_ns, default_flow_style=False, indent=4), ""])) - ns = network_state.parse_net_config_data(pre_ns) - else: - pre_ns = openstack.convert_net_json( - json.loads(net_data), known_macs=known_macs) - ns = network_state.parse_net_config_data(pre_ns) - - if not ns: - raise RuntimeError("No valid network_state object created from" - "input data") - - if args.debug: - sys.stderr.write('\n'.join([ - "", "Internal State", - yaml.dump(ns, default_flow_style=False, indent=4), ""])) - if args.output_kind == "eni": - r_cls = eni.Renderer - elif args.output_kind == "netplan": - r_cls = netplan.Renderer - else: - r_cls = sysconfig.Renderer - - r = r_cls() - sys.stderr.write(''.join([ - "Read input format '%s' from '%s'.\n" % ( - args.kind, args.network_data.name), - "Wrote output format '%s' to '%s'\n" % ( - args.output_kind, args.directory)]) + "\n") - r.render_network_state(network_state=ns, target=args.directory) - - -if __name__ == '__main__': - main() - -# vi: ts=4 expandtab -- cgit v1.2.3 From 41f508da4f1e421f454136f3c84527a9911c63ff Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 6 Aug 2018 19:33:54 +0000 Subject: netplan: Correctly render macaddress on a bonds and bridges when provided. When converting network config v1 to netplan, we were not correctly rendering the 'macaddress' key on a bond. Not that the difference in spelling between v1 'mac_address' and v2 'macaddress' is intentional. Also fixed here is rendering of the macaddress for bridges. LP: #1784699 --- cloudinit/net/eni.py | 11 +++++++++-- cloudinit/net/netplan.py | 4 ++++ tests/unittests/test_net.py | 6 ++++++ 3 files changed, 19 insertions(+), 2 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index bd20a361..80be2429 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -247,8 +247,15 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path): ifaces[currif]['bridge']['ports'] = [] for iface in split[1:]: ifaces[currif]['bridge']['ports'].append(iface) - elif option == "bridge_hw" and split[1].lower() == "mac": - ifaces[currif]['bridge']['mac'] = split[2] + elif option == "bridge_hw": + # doc is confusing and thus some may put literal 'MAC' + # bridge_hw MAC
+ # but correct is: + # bridge_hw
+ if split[1].lower() == "mac": + ifaces[currif]['bridge']['mac'] = split[2] + else: + ifaces[currif]['bridge']['mac'] = split[1] elif option == "bridge_pathcost": if 'pathcost' not in ifaces[currif]['bridge']: ifaces[currif]['bridge']['pathcost'] = {} diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 40143634..6352e78c 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -291,6 +291,8 @@ class Renderer(renderer.Renderer): if len(bond_config) > 0: bond.update({'parameters': bond_config}) + if ifcfg.get('mac_address'): + bond['macaddress'] = ifcfg.get('mac_address').lower() slave_interfaces = ifcfg.get('bond-slaves') if slave_interfaces == 'none': _extract_bond_slaves_by_name(interfaces, bond, ifname) @@ -327,6 +329,8 @@ class Renderer(renderer.Renderer): if len(br_config) > 0: bridge.update({'parameters': br_config}) + if ifcfg.get('mac_address'): + bridge['macaddress'] = ifcfg.get('mac_address').lower() _extract_addresses(ifcfg, bridge, ifname) bridges.update({ifname: bridge}) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 5ab61cf2..58e5ea14 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -643,6 +643,7 @@ iface br0 inet static bridge_stp off bridge_waitport 1 eth3 bridge_waitport 2 eth4 + hwaddress bb:bb:bb:bb:bb:aa # control-alias br0 iface br0 inet6 static @@ -708,6 +709,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true interfaces: - eth1 - eth2 + macaddress: aa:bb:cc:dd:ee:ff parameters: mii-monitor-interval: 100 mode: active-backup @@ -720,6 +722,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true interfaces: - eth3 - eth4 + macaddress: bb:bb:bb:bb:bb:aa nameservers: addresses: - 8.8.8.8 @@ -803,6 +806,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPV6ADDR=2001:1::1/64 IPV6INIT=yes IPV6_DEFAULTGW=2001:4800:78ff:1b::1 + MACADDR=bb:bb:bb:bb:bb:aa NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes @@ -973,6 +977,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true use_tempaddr: 1 forwarding: 1 # basically anything in /proc/sys/net/ipv6/conf/.../ + mac_address: bb:bb:bb:bb:bb:aa params: bridge_ageing: 250 bridge_bridgeprio: 22 @@ -1075,6 +1080,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true interfaces: - bond0s0 - bond0s1 + macaddress: aa:bb:cc:dd:e8:ff mtu: 9000 parameters: mii-monitor-interval: 100 -- cgit v1.2.3 From 51f49dc1da0c045d01ddc977874c9bed70cb510f Mon Sep 17 00:00:00 2001 From: Louis Bouchard Date: Fri, 17 Aug 2018 18:43:44 +0000 Subject: Scaleway: Add network configuration to the DataSource DEP_NETWORK is removed since the network_config must run at each boot. New EventType.BOOT event is used for that. Network is brought up early to fetch the metadata which is required to configure the network (ipv4 and/or v6). Adds unittests for the following and fixes test_common for LOCAL and NETWORK sets. --- cloudinit/sources/DataSourceScaleway.py | 54 +++++++++++++--- tests/unittests/test_datasource/test_common.py | 2 +- tests/unittests/test_datasource/test_scaleway.py | 79 +++++++++++++++++++++++- 3 files changed, 124 insertions(+), 11 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index e2502b02..9dc4ab23 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -29,7 +29,9 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper from cloudinit import util - +from cloudinit import net +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -168,8 +170,8 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): - dsname = "Scaleway" + update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) @@ -185,11 +187,10 @@ class DataSourceScaleway(sources.DataSource): self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) + self._fallback_interface = None + self._network_config = None - def _get_data(self): - if not on_scaleway(): - return False - + def _crawl_metadata(self): resp = url_helper.readurl(self.metadata_address, timeout=self.timeout, retries=self.retries) @@ -203,8 +204,47 @@ class DataSourceScaleway(sources.DataSource): 'vendor-data', self.vendordata_address, self.retries, self.timeout ) + + def _get_data(self): + if not on_scaleway(): + return False + + if self._fallback_interface is None: + self._fallback_interface = net.find_fallback_nic() + try: + with EphemeralDHCPv4(self._fallback_interface): + util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self._crawl_metadata) + except (NoDHCPLeaseError) as e: + util.logexc(LOG, str(e)) + return False return True + @property + def network_config(self): + """ + Configure networking according to data received from the + metadata API. + """ + if self._network_config: + return self._network_config + + if self._fallback_interface is None: + self._fallback_interface = net.find_fallback_nic() + + netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface} + subnets = [{'type': 'dhcp4'}] + if self.metadata['ipv6']: + subnets += [{'type': 'static', + 'address': '%s' % self.metadata['ipv6']['address'], + 'gateway': '%s' % self.metadata['ipv6']['gateway'], + 'netmask': '%s' % self.metadata['ipv6']['netmask'], + }] + netcfg['subnets'] = subnets + self._network_config = {'version': 1, 'config': [netcfg]} + return self._network_config + @property def launch_index(self): return None @@ -228,7 +268,7 @@ class DataSourceScaleway(sources.DataSource): datasources = [ - (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceScaleway, (sources.DEP_FILESYSTEM,)), ] diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 0d35dc29..1a5a3db2 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -41,6 +41,7 @@ DEFAULT_LOCAL = [ SmartOS.DataSourceSmartOS, Ec2.DataSourceEc2Local, OpenStack.DataSourceOpenStackLocal, + Scaleway.DataSourceScaleway, ] DEFAULT_NETWORK = [ @@ -55,7 +56,6 @@ DEFAULT_NETWORK = [ NoCloud.DataSourceNoCloudNet, OpenStack.DataSourceOpenStack, OVF.DataSourceOVFNet, - Scaleway.DataSourceScaleway, ] diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index e4e9bb20..c2bc7a00 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -176,11 +176,18 @@ class TestDataSourceScaleway(HttprettyTestCase): self.vendordata_url = \ DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] + self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway', + '_m_on_scaleway', return_value=True) + self.add_patch( + 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic', + '_m_find_fallback_nic', return_value='scalewaynic0') + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @mock.patch('time.sleep', return_value=None) - def test_metadata_ok(self, sleep, m_get_cmdline): + def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4): """ get_data() returns metadata, user data and vendor data. """ @@ -211,11 +218,12 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @mock.patch('time.sleep', return_value=None) - def test_metadata_404(self, sleep, m_get_cmdline): + def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4): """ get_data() returns metadata, but no user data nor vendor data. """ @@ -234,11 +242,12 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.get_vendordata_raw()) self.assertEqual(sleep.call_count, 0) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @mock.patch('time.sleep', return_value=None) - def test_metadata_rate_limit(self, sleep, m_get_cmdline): + def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4): """ get_data() is rate limited two times by the metadata API when fetching user data. @@ -262,3 +271,67 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertEqual(self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA) self.assertEqual(sleep.call_count, 2) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4 config if no ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + + netcfg = self.datasource.network_config + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}]}] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4/v6 configs if ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = { + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', + } + + netcfg = self.datasource.network_config + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}, + {'type': 'static', + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', } + ] + + }] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_existing(self, m_get_cmdline, fallback_nic): + """ + network_config() should return the same data if a network config + already exists + """ + m_get_cmdline.return_value = 'scaleway' + self.datasource._network_config = '0xdeadbeef' + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, '0xdeadbeef') -- cgit v1.2.3 From 47548df9ded4ad4088d3d846f1876b29b16aa7d1 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 17 Aug 2018 20:24:58 +0000 Subject: azure: allow azure to generate network configuration from IMDS per boot. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Azure datasource now queries IMDS metadata service for network configuration at link local address http://169.254.169.254/metadata/instance?api-version=2017-12-01. The azure metadata service presents a list of macs and allocated ip addresses associated with this instance. Azure will now also regenerate network configuration on every boot because it subscribes to EventType.BOOT maintenance events as well as the 'first boot' EventType.BOOT_NEW_INSTANCE. For testing add azure-imds --kind to cloud-init devel net_convert tool for debugging IMDS metadata. Also refactor _get_data into 3 discrete methods:   - is_platform_viable: check quickly whether the datasource is     potentially compatible with the platform on which is is running   - crawl_metadata: walk all potential metadata candidates, returning a     structured dict of all metadata and userdata. Raise InvalidMetaData on     error.   - _get_data: call crawl_metadata and process results or error. Cache     instance data on class attributes: metadata, userdata_raw etc. --- cloudinit/cmd/devel/net_convert.py | 9 +- cloudinit/sources/DataSourceAzure.py | 256 +++++++++++++++-- tests/unittests/test_datasource/test_azure.py | 399 ++++++++++++++++++++++++-- 3 files changed, 605 insertions(+), 59 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 1ec08a3c..271dc5ed 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -8,6 +8,7 @@ import sys import yaml from cloudinit.sources.helpers import openstack +from cloudinit.sources import DataSourceAzure as azure from cloudinit.net import eni, netplan, network_state, sysconfig from cloudinit import log @@ -28,7 +29,8 @@ def get_parser(parser=None): parser.add_argument("-p", "--network-data", type=open, metavar="PATH", required=True) parser.add_argument("-k", "--kind", - choices=['eni', 'network_data.json', 'yaml'], + choices=['eni', 'network_data.json', 'yaml', + 'azure-imds'], required=True) parser.add_argument("-d", "--directory", metavar="PATH", @@ -78,10 +80,13 @@ def handle_args(name, args): ["Input YAML", yaml.dump(pre_ns, default_flow_style=False, indent=4), ""])) ns = network_state.parse_net_config_data(pre_ns) - else: + elif args.kind == 'network_data.json': pre_ns = openstack.convert_net_json( json.loads(net_data), known_macs=known_macs) ns = network_state.parse_net_config_data(pre_ns) + elif args.kind == 'azure-imds': + pre_ns = azure.parse_network_config(json.loads(net_data)) + ns = network_state.parse_net_config_data(pre_ns) if not ns: raise RuntimeError("No valid network_state object created from" diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 7007d9ea..783445e1 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -8,6 +8,7 @@ import base64 import contextlib import crypt from functools import partial +import json import os import os.path import re @@ -17,6 +18,7 @@ import xml.etree.ElementTree as ET from cloudinit import log as logging from cloudinit import net +from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric @@ -49,7 +51,17 @@ DEFAULT_FS = 'ext4' AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" -IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" +AGENT_SEED_DIR = '/var/lib/waagent' +IMDS_URL = "http://169.254.169.254/metadata/" + +# List of static scripts and network config artifacts created by +# stock ubuntu suported images. +UBUNTU_EXTENDED_NETWORK_SCRIPTS = [ + '/etc/netplan/90-azure-hotplug.yaml', + '/usr/local/sbin/ephemeral_eth.sh', + '/etc/udev/rules.d/10-net-device-added.rules', + '/run/network/interfaces.ephemeral.d', +] def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -185,7 +197,7 @@ if util.is_FreeBSD(): BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, - 'data_dir': "/var/lib/waagent", + 'data_dir': AGENT_SEED_DIR, 'set_hostname': True, 'hostname_bounce': { 'interface': DEFAULT_PRIMARY_NIC, @@ -252,6 +264,7 @@ class DataSourceAzure(sources.DataSource): dsname = 'Azure' _negotiated = False + _metadata_imds = sources.UNSET def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -263,6 +276,8 @@ class DataSourceAzure(sources.DataSource): BUILTIN_DS_CONFIG]) self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') self._network_config = None + # Regenerate network config new_instance boot and every boot + self.update_events['network'].add(EventType.BOOT) def __str__(self): root = sources.DataSource.__str__(self) @@ -336,15 +351,17 @@ class DataSourceAzure(sources.DataSource): metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata - def _get_data(self): + def crawl_metadata(self): + """Walk all instance metadata sources returning a dict on success. + + @return: A dictionary of any metadata content for this instance. + @raise: InvalidMetaDataException when the expected metadata service is + unavailable, broken or disabled. + """ + crawled_data = {} # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid - asset_tag = util.read_dmi_data('chassis-asset-tag') - if asset_tag != AZURE_CHASSIS_ASSET_TAG: - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - return False - ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] @@ -373,41 +390,84 @@ class DataSourceAzure(sources.DataSource): except NonAzureDataSource: continue except BrokenAzureDataSource as exc: - raise exc + msg = 'BrokenAzureDataSource: %s' % exc + raise sources.InvalidMetaDataException(msg) except util.MountFailedError: LOG.warning("%s was not mountable", cdev) continue if reprovision or self._should_reprovision(ret): ret = self._reprovision() - (md, self.userdata_raw, cfg, files) = ret + imds_md = get_metadata_from_imds( + self.fallback_interface, retries=3) + (md, userdata_raw, cfg, files) = ret self.seed = cdev - self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) - self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG]) + crawled_data.update({ + 'cfg': cfg, + 'files': files, + 'metadata': util.mergemanydict( + [md, {'imds': imds_md}]), + 'userdata_raw': userdata_raw}) found = cdev LOG.debug("found datasource in %s", cdev) break if not found: - return False + raise sources.InvalidMetaDataException('No Azure metadata found') if found == ddir: LOG.debug("using files cached in %s", ddir) seed = _get_random_seed() if seed: - self.metadata['random_seed'] = seed + crawled_data['metadata']['random_seed'] = seed + crawled_data['metadata']['instance-id'] = util.read_dmi_data( + 'system-uuid') + return crawled_data + + def _is_platform_viable(self): + """Check platform environment to report if this datasource may run.""" + return _is_platform_viable(self.seed_dir) + + def clear_cached_attrs(self, attr_defaults=()): + """Reset any cached class attributes to defaults.""" + super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) + self._metadata_imds = sources.UNSET + + def _get_data(self): + """Crawl and process datasource metadata caching metadata as attrs. + + @return: True on success, False on error, invalid or disabled + datasource. + """ + if not self._is_platform_viable(): + return False + try: + crawled_data = util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self.crawl_metadata) + except sources.InvalidMetaDataException as e: + LOG.warning('Could not crawl Azure metadata: %s', e) + return False + if self.distro and self.distro.name == 'ubuntu': + maybe_remove_ubuntu_network_config_scripts() + + # Process crawled data and augment with various config defaults + self.cfg = util.mergemanydict( + [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG]) + self._metadata_imds = crawled_data['metadata']['imds'] + self.metadata = util.mergemanydict( + [crawled_data['metadata'], DEFAULT_METADATA]) + self.userdata_raw = crawled_data['userdata_raw'] user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) # walinux agent writes files world readable, but expects # the directory to be protected. - write_files(ddir, files, dirmode=0o700) - - self.metadata['instance-id'] = util.read_dmi_data('system-uuid') - + write_files( + self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700) return True def device_name_to_device(self, name): @@ -436,7 +496,7 @@ class DataSourceAzure(sources.DataSource): def _poll_imds(self): """Poll IMDS for the new provisioning data until we get a valid response. Then return the returned JSON object.""" - url = IMDS_URL + "?api-version=2017-04-02" + url = IMDS_URL + "reprovisiondata?api-version=2017-04-02" headers = {"Metadata": "true"} report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) LOG.debug("Start polling IMDS") @@ -487,7 +547,7 @@ class DataSourceAzure(sources.DataSource): jump back into the polling loop in order to retrieve the ovf_env.""" if not ret: return False - (_md, self.userdata_raw, cfg, _files) = ret + (_md, _userdata_raw, cfg, _files) = ret path = REPROVISION_MARKER_FILE if (cfg.get('PreprovisionedVm') is True or os.path.isfile(path)): @@ -543,22 +603,15 @@ class DataSourceAzure(sources.DataSource): @property def network_config(self): """Generate a network config like net.generate_fallback_network() with - the following execptions. + the following exceptions. 1. Probe the drivers of the net-devices present and inject them in the network configuration under params: driver: value 2. Generate a fallback network config that does not include any of the blacklisted devices. """ - blacklist = ['mlx4_core'] if not self._network_config: - LOG.debug('Azure: generating fallback configuration') - # generate a network config, blacklist picking any mlx4_core devs - netconfig = net.generate_fallback_config( - blacklist_drivers=blacklist, config_driver=True) - - self._network_config = netconfig - + self._network_config = parse_network_config(self._metadata_imds) return self._network_config @@ -1025,6 +1078,151 @@ def load_azure_ds_dir(source_dir): return (md, ud, cfg, {'ovf-env.xml': contents}) +def parse_network_config(imds_metadata): + """Convert imds_metadata dictionary to network v2 configuration. + + Parses network configuration from imds metadata if present or generate + fallback network config excluding mlx4_core devices. + + @param: imds_metadata: Dict of content read from IMDS network service. + @return: Dictionary containing network version 2 standard configuration. + """ + if imds_metadata != sources.UNSET and imds_metadata: + netconfig = {'version': 2, 'ethernets': {}} + LOG.debug('Azure: generating network configuration from IMDS') + network_metadata = imds_metadata['network'] + for idx, intf in enumerate(network_metadata['interface']): + nicname = 'eth{idx}'.format(idx=idx) + dev_config = {} + for addr4 in intf['ipv4']['ipAddress']: + privateIpv4 = addr4['privateIpAddress'] + if privateIpv4: + if dev_config.get('dhcp4', False): + # Append static address config for nic > 1 + netPrefix = intf['ipv4']['subnet'][0].get( + 'prefix', '24') + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIpv4, prefix=netPrefix)) + else: + dev_config['dhcp4'] = True + for addr6 in intf['ipv6']['ipAddress']: + privateIpv6 = addr6['privateIpAddress'] + if privateIpv6: + dev_config['dhcp6'] = True + break + if dev_config: + mac = ':'.join(re.findall(r'..', intf['macAddress'])) + dev_config.update( + {'match': {'macaddress': mac.lower()}, + 'set-name': nicname}) + netconfig['ethernets'][nicname] = dev_config + else: + blacklist = ['mlx4_core'] + LOG.debug('Azure: generating fallback configuration') + # generate a network config, blacklist picking mlx4_core devs + netconfig = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True) + return netconfig + + +def get_metadata_from_imds(fallback_nic, retries): + """Query Azure's network metadata service, returning a dictionary. + + If network is not up, setup ephemeral dhcp on fallback_nic to talk to the + IMDS. For more info on IMDS: + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service + + @param fallback_nic: String. The name of the nic which requires active + network in order to query IMDS. + @param retries: The number of retries of the IMDS_URL. + + @return: A dict of instance metadata containing compute and network + info. + """ + kwargs = {'logfunc': LOG.debug, + 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)', + 'func': _get_metadata_from_imds, 'args': (retries,)} + if net.is_up(fallback_nic): + return util.log_time(**kwargs) + else: + with EphemeralDHCPv4(fallback_nic): + return util.log_time(**kwargs) + + +def _get_metadata_from_imds(retries): + + def retry_on_url_error(msg, exception): + if isinstance(exception, UrlError) and exception.code == 404: + return True # Continue retries + return False # Stop retries on all other exceptions + + url = IMDS_URL + "instance?api-version=2017-12-01" + headers = {"Metadata": "true"} + try: + response = readurl( + url, timeout=1, headers=headers, retries=retries, + exception_cb=retry_on_url_error) + except Exception as e: + LOG.debug('Ignoring IMDS instance metadata: %s', e) + return {} + try: + return util.load_json(str(response)) + except json.decoder.JSONDecodeError: + LOG.warning( + 'Ignoring non-json IMDS instance metadata: %s', str(response)) + return {} + + +def maybe_remove_ubuntu_network_config_scripts(paths=None): + """Remove Azure-specific ubuntu network config for non-primary nics. + + @param paths: List of networking scripts or directories to remove when + present. + + In certain supported ubuntu images, static udev rules or netplan yaml + config is delivered in the base ubuntu image to support dhcp on any + additional interfaces which get attached by a customer at some point + after initial boot. Since the Azure datasource can now regenerate + network configuration as metadata reports these new devices, we no longer + want the udev rules or netplan's 90-azure-hotplug.yaml to configure + networking on eth1 or greater as it might collide with cloud-init's + configuration. + + Remove the any existing extended network scripts if the datasource is + enabled to write network per-boot. + """ + if not paths: + paths = UBUNTU_EXTENDED_NETWORK_SCRIPTS + logged = False + for path in paths: + if os.path.exists(path): + if not logged: + LOG.info( + 'Removing Ubuntu extended network scripts because' + ' cloud-init updates Azure network configuration on the' + ' following event: %s.', + EventType.BOOT) + logged = True + if os.path.isdir(path): + util.del_dir(path) + else: + util.del_file(path) + + +def _is_platform_viable(seed_dir): + """Check platform environment to report if this datasource may run.""" + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag == AZURE_CHASSIS_ASSET_TAG: + return True + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): + return True + return False + + class BrokenAzureDataSource(Exception): pass diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index e82716eb..4e428b71 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,15 +1,21 @@ # This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import distros from cloudinit import helpers -from cloudinit.sources import DataSourceAzure as dsaz +from cloudinit import url_helper +from cloudinit.sources import ( + UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, find_freebsd_part, get_path_dev_freebsd, MountFailedError) from cloudinit.version import version_string as vs -from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, - ExitStack, PY26, SkipTest) +from cloudinit.tests.helpers import ( + HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, + ExitStack, PY26, SkipTest) import crypt +import httpretty +import json import os import stat import xml.etree.ElementTree as ET @@ -77,6 +83,106 @@ def construct_valid_ovf_env(data=None, pubkeys=None, return content +NETWORK_METADATA = { + "network": { + "interface": [ + { + "macAddress": "000D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.0.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81" + } + ] + } + } + ] + } +} + + +class TestGetMetadataFromIMDS(HttprettyTestCase): + + with_logs = True + + def setUp(self): + super(TestGetMetadataFromIMDS, self).setUp() + self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2017-12-01" + + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + def test_get_metadata_does_not_dhcp_if_network_is_up( + self, m_net_is_up, m_dhcp, m_readurl): + """Do not perform DHCP setup when nic is already up.""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=3)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_not_called() + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + def test_get_metadata_performs_dhcp_when_network_is_down( + self, m_net_is_up, m_dhcp, m_readurl): + """Perform DHCP setup when nic is not up.""" + m_net_is_up.return_value = False + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_called_with('eth9') + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + m_readurl.assert_called_with( + self.network_md_url, exception_cb=mock.ANY, + headers={'Metadata': 'true'}, retries=2, timeout=1) + + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up') + def test_get_metadata_from_imds_empty_when_no_imds_present( + self, m_net_is_up, m_sleep): + """Return empty dict when IMDS network metadata is absent.""" + httpretty.register_uri( + httpretty.GET, + dsaz.IMDS_URL + 'instance?api-version=2017-12-01', + body={}, status=404) + + m_net_is_up.return_value = True # skips dhcp + + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list) + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + class TestAzureDataSource(CiTestCase): with_logs = True @@ -95,8 +201,19 @@ class TestAzureDataSource(CiTestCase): self.patches = ExitStack() self.addCleanup(self.patches.close) - self.patches.enter_context(mock.patch.object(dsaz, '_get_random_seed')) - + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) + self.m_get_metadata_from_imds = self.patches.enter_context( + mock.patch.object( + dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value=NETWORK_METADATA))) + self.m_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.sources.net.find_fallback_nic', + return_value='eth9')) + self.m_remove_ubuntu_network_scripts = self.patches.enter_context( + mock.patch.object( + dsaz, 'maybe_remove_ubuntu_network_config_scripts', + mock.MagicMock())) super(TestAzureDataSource, self).setUp() def apply_patches(self, patches): @@ -137,7 +254,7 @@ scbus-1 on xpt0 bus 0 ]) return dsaz - def _get_ds(self, data, agent_command=None): + def _get_ds(self, data, agent_command=None, distro=None): def dsdevs(): return data.get('dsdevs', []) @@ -186,8 +303,11 @@ scbus-1 on xpt0 bus 0 side_effect=_wait_for_files)), ]) + if distro is not None: + distro_cls = distros.fetch(distro) + distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) dsrc = dsaz.DataSourceAzure( - data.get('sys_cfg', {}), distro=None, paths=self.paths) + data.get('sys_cfg', {}), distro=distro, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -260,29 +380,20 @@ fdescfs /dev/fd fdescfs rw 0 0 res = get_path_dev_freebsd('/etc', mnt_list) self.assertIsNotNone(res) - @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') - def test_non_azure_dmi_chassis_asset_tag(self, m_read_dmi_data): - """Report non-azure when DMI's chassis asset tag doesn't match. - - Return False when the asset tag doesn't match Azure's static - AZURE_CHASSIS_ASSET_TAG. - """ + @mock.patch('cloudinit.sources.DataSourceAzure._is_platform_viable') + def test_call_is_platform_viable_seed(self, m_is_platform_viable): + """Check seed_dir using _is_platform_viable and return False.""" # Return a non-matching asset tag value - nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' - m_read_dmi_data.return_value = nonazure_tag + m_is_platform_viable.return_value = False dsrc = dsaz.DataSourceAzure( {}, distro=None, paths=self.paths) self.assertFalse(dsrc.get_data()) - self.assertEqual( - "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( - nonazure_tag), - self.logs.getvalue()) + m_is_platform_viable.assert_called_with(dsrc.seed_dir) def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} - dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) @@ -291,6 +402,82 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertTrue(os.path.isfile( os.path.join(self.waagent_d, 'ovf-env.xml'))) + def test_get_data_non_ubuntu_will_not_remove_network_scripts(self): + """get_data on non-Ubuntu will not remove ubuntu net scripts.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data, distro='debian') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_not_called() + + def test_get_data_on_ubuntu_will_remove_network_scripts(self): + """get_data will remove ubuntu net scripts on Ubuntu distro.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data, distro='ubuntu') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_called_once_with() + + def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): + """Return all structured metadata and cache no class attributes.""" + yaml_cfg = "{agent_command: my_command}\n" + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, + 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + dsrc = self._get_ds(data) + expected_cfg = { + 'PreprovisionedVm': False, + 'datasource': {'Azure': {'agent_command': 'my_command'}}, + 'system_info': {'default_user': {'name': u'myuser'}}} + expected_metadata = { + 'azure_data': { + 'configurationsettype': 'LinuxProvisioningConfiguration'}, + 'imds': {'network': {'interface': [{ + 'ipv4': {'ipAddress': [ + {'privateIpAddress': '10.0.0.4', + 'publicIpAddress': '104.46.124.81'}], + 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, + 'ipv6': {'ipAddress': []}, + 'macAddress': '000D3A047598'}]}}, + 'instance-id': 'test-instance-id', + 'local-hostname': u'myhost', + 'random_seed': 'wild'} + + crawled_metadata = dsrc.crawl_metadata() + + self.assertItemsEqual( + crawled_metadata.keys(), + ['cfg', 'files', 'metadata', 'userdata_raw']) + self.assertEqual(crawled_metadata['cfg'], expected_cfg) + self.assertEqual( + list(crawled_metadata['files'].keys()), ['ovf-env.xml']) + self.assertIn( + b'myhost', + crawled_metadata['files']['ovf-env.xml']) + self.assertEqual(crawled_metadata['metadata'], expected_metadata) + self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR') + self.assertEqual(dsrc.userdata_raw, None) + self.assertEqual(dsrc.metadata, {}) + self.assertEqual(dsrc._metadata_imds, UNSET) + self.assertFalse(os.path.isfile( + os.path.join(self.waagent_d, 'ovf-env.xml'))) + + def test_crawl_metadata_raises_invalid_metadata_on_error(self): + """crawl_metadata raises an exception on invalid ovf-env.xml.""" + data = {'ovfcontent': "BOGUS", 'sys_cfg': {}} + dsrc = self._get_ds(data) + error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:' + ' syntax error: line 1, column 0') + with self.assertRaises(InvalidMetaDataException) as cm: + dsrc.crawl_metadata() + self.assertEqual(str(cm.exception), error_msg) + def test_waagent_d_has_0700_perms(self): # we expect /var/lib/waagent to be created 0700 dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) @@ -314,6 +501,20 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertTrue(ret) self.assertEqual(data['agent_invoked'], cfg['agent_command']) + def test_network_config_set_from_imds(self): + """Datasource.network_config returns IMDS network data.""" + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp4': True}}, + 'version': 2} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + def test_user_cfg_set_agent_command(self): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} @@ -579,12 +780,34 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertEqual( [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) + @mock.patch('cloudinit.net.generate_fallback_config') + def test_imds_network_config(self, mock_fallback): + """Network config is generated from IMDS network data when present.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + expected_cfg = { + 'ethernets': { + 'eth0': {'dhcp4': True, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, + 'version': 2} + + self.assertEqual(expected_cfg, dsrc.network_config) + mock_fallback.assert_not_called() + @mock.patch('cloudinit.net.get_interface_mac') @mock.patch('cloudinit.net.get_devicelist') @mock.patch('cloudinit.net.device_driver') @mock.patch('cloudinit.net.generate_fallback_config') - def test_network_config(self, mock_fallback, mock_dd, - mock_devlist, mock_get_mac): + def test_fallback_network_config(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + """On absent IMDS network data, generate network fallback config.""" odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} @@ -605,6 +828,8 @@ fdescfs /dev/fd fdescfs rw 0 0 mock_get_mac.return_value = '00:11:22:33:44:55' dsrc = self._get_ds(data) + # Represent empty response from network imds + self.m_get_metadata_from_imds.return_value = {} ret = dsrc.get_data() self.assertTrue(ret) @@ -617,8 +842,9 @@ fdescfs /dev/fd fdescfs rw 0 0 @mock.patch('cloudinit.net.get_devicelist') @mock.patch('cloudinit.net.device_driver') @mock.patch('cloudinit.net.generate_fallback_config') - def test_network_config_blacklist(self, mock_fallback, mock_dd, - mock_devlist, mock_get_mac): + def test_fallback_network_config_blacklist(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + """On absent network metadata, blacklist mlx from fallback config.""" odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} @@ -649,6 +875,8 @@ fdescfs /dev/fd fdescfs rw 0 0 mock_get_mac.return_value = '00:11:22:33:44:55' dsrc = self._get_ds(data) + # Represent empty response from network imds + self.m_get_metadata_from_imds.return_value = {} ret = dsrc.get_data() self.assertTrue(ret) @@ -689,9 +917,12 @@ class TestAzureBounce(CiTestCase): mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(dsaz.util, 'which', lambda x: True)) + mock.patch.object(dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(dsaz, '_get_random_seed')) + mock.patch.object(dsaz.util, 'which', lambda x: True)) + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) def _dmi_mocks(key): if key == 'system-uuid': @@ -719,9 +950,12 @@ class TestAzureBounce(CiTestCase): mock.patch.object(dsaz, 'set_hostname')) self.subp = self.patches.enter_context( mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) + self.find_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) def tearDown(self): self.patches.close() + super(TestAzureBounce, self).tearDown() def _get_ds(self, ovfcontent=None, agent_command=None): if ovfcontent is not None: @@ -927,7 +1161,7 @@ class TestLoadAzureDsDir(CiTestCase): str(context_manager.exception)) -class TestReadAzureOvf(TestCase): +class TestReadAzureOvf(CiTestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) @@ -1188,6 +1422,25 @@ class TestCanDevBeReformatted(CiTestCase): "(datasource.Azure.never_destroy_ntfs)", msg) +class TestClearCachedData(CiTestCase): + + def test_clear_cached_attrs_clears_imds(self): + """All class attributes are reset to defaults, including imds data.""" + tmp = self.tmp_dir() + paths = helpers.Paths( + {'cloud_dir': tmp, 'run_dir': tmp}) + dsrc = dsaz.DataSourceAzure({}, distro=None, paths=paths) + clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds] + dsrc.metadata = 'md' + dsrc.userdata = 'ud' + dsrc._metadata_imds = 'imds' + dsrc._dirty_cache = True + dsrc.clear_cached_attrs() + self.assertEqual( + [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], + clean_values) + + class TestAzureNetExists(CiTestCase): def test_azure_net_must_exist_for_legacy_objpkl(self): @@ -1398,4 +1651,94 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): self.assertEqual(m_net.call_count, 1) +class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestRemoveUbuntuNetworkConfigScripts, self).setUp() + self.tmp = self.tmp_dir() + + def test_remove_network_scripts_removes_both_files_and_directories(self): + """Any files or directories in paths are removed when present.""" + file1 = self.tmp_path('file1', dir=self.tmp) + subdir = self.tmp_path('sub1', dir=self.tmp) + subfile = self.tmp_path('leaf1', dir=subdir) + write_file(file1, 'file1content') + write_file(subfile, 'leafcontent') + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1]) + + for path in (file1, subdir, subfile): + self.assertFalse(os.path.exists(path), + 'Found unremoved: %s' % path) + + expected_logs = [ + 'INFO: Removing Ubuntu extended network scripts because cloud-init' + ' updates Azure network configuration on the following event:' + ' System boot.', + 'Recursively deleting %s' % subdir, + 'Attempting to remove %s' % file1] + for log in expected_logs: + self.assertIn(log, self.logs.getvalue()) + + def test_remove_network_scripts_only_attempts_removal_if_path_exists(self): + """Any files or directories absent are skipped without error.""" + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[ + self.tmp_path('nodirhere/', dir=self.tmp), + self.tmp_path('notfilehere', dir=self.tmp)]) + self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs + + @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists') + def test_remove_network_scripts_default_removes_stock_scripts(self, + m_exists): + """Azure's stock ubuntu image scripts and artifacts are removed.""" + # Report path absent on all to avoid delete operation + m_exists.return_value = False + dsaz.maybe_remove_ubuntu_network_config_scripts() + calls = m_exists.call_args_list + for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS: + self.assertIn(mock.call(path), calls) + + +class TestWBIsPlatformViable(CiTestCase): + """White box tests for _is_platform_viable.""" + with_logs = True + + @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + def test_true_on_non_azure_chassis(self, m_read_dmi_data): + """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG.""" + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + self.assertTrue(dsaz._is_platform_viable('doesnotmatter')) + + @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists') + @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist): + """Return True if ovf-env.xml exists in known seed dirs.""" + # Non-matching Azure chassis-asset-tag + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' + + m_exist.return_value = True + self.assertTrue(dsaz._is_platform_viable('/some/seed/dir')) + m_exist.called_once_with('/other/seed/dir') + + def test_false_on_no_matching_azure_criteria(self): + """Report non-azure on unmatched asset tag, ovf-env absent and no dev. + + Return False when the asset tag doesn't match Azure's static + AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs + and no devices have a label starting with prefix 'rd_rdfe_'. + """ + self.assertFalse(wrap_and_call( + 'cloudinit.sources.DataSourceAzure', + {'os.path.exists': False, + # Non-matching Azure chassis-asset-tag + 'util.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X', + 'util.which': None}, + dsaz._is_platform_viable, 'doesnotmatter')) + self.assertIn( + "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( + dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'), + self.logs.getvalue()) + + # vi: ts=4 expandtab -- cgit v1.2.3 From aaffd59431fe05932a66016db941fe197c4e7620 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Aug 2018 20:25:31 +0000 Subject: Add datasource Oracle Compute Infrastructure (OCI). This adds a Oracle specific datasource that functions with OCI. It is a simplified version of the OpenStack metadata server with support for vendor-data. It does not support the OCI-C (classic) platform. Also here is a move of BrokenMetadata to common 'sources' as this was the third occurrence of that class. --- .pylintrc | 3 +- cloudinit/apport.py | 1 + cloudinit/settings.py | 1 + cloudinit/sources/DataSourceIBMCloud.py | 13 +- cloudinit/sources/DataSourceOpenStack.py | 12 +- cloudinit/sources/DataSourceOracle.py | 233 +++++++++++++++ cloudinit/sources/__init__.py | 4 + cloudinit/sources/helpers/openstack.py | 6 +- cloudinit/sources/tests/test_oracle.py | 331 ++++++++++++++++++++++ doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/oracle.rst | 26 ++ tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_openstack.py | 13 +- tests/unittests/test_ds_identify.py | 19 ++ tools/ds-identify | 8 +- 15 files changed, 650 insertions(+), 23 deletions(-) create mode 100644 cloudinit/sources/DataSourceOracle.py create mode 100644 cloudinit/sources/tests/test_oracle.py create mode 100644 doc/rtd/topics/datasources/oracle.rst (limited to 'tests/unittests') diff --git a/.pylintrc b/.pylintrc index 3bfa0c81..e376b48b 100644 --- a/.pylintrc +++ b/.pylintrc @@ -61,7 +61,8 @@ ignored-modules= # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of # qualified names. -ignored-classes=optparse.Values,thread._local +# argparse.Namespace from https://github.com/PyCQA/pylint/issues/2413 +ignored-classes=argparse.Namespace,optparse.Values,thread._local # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 130ff269..22cb7fde 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -30,6 +30,7 @@ KNOWN_CLOUD_NAMES = [ 'NoCloud', 'OpenNebula', 'OpenStack', + 'Oracle', 'OVF', 'OpenTelekomCloud', 'Scaleway', diff --git a/cloudinit/settings.py b/cloudinit/settings.py index dde5749d..ea367cb7 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -38,6 +38,7 @@ CFG_BUILTIN = { 'Scaleway', 'Hetzner', 'IBMCloud', + 'Oracle', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index 01106ec0..a5358148 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -295,7 +295,7 @@ def read_md(): results = metadata_from_dir(path) else: results = util.mount_cb(path, metadata_from_dir) - except BrokenMetadata as e: + except sources.BrokenMetadata as e: raise RuntimeError( "Failed reading IBM config disk (platform=%s path=%s): %s" % (platform, path, e)) @@ -304,10 +304,6 @@ def read_md(): return ret -class BrokenMetadata(IOError): - pass - - def metadata_from_dir(source_dir): """Walk source_dir extracting standardized metadata. @@ -352,12 +348,13 @@ def metadata_from_dir(source_dir): try: data = transl(raw) except Exception as e: - raise BrokenMetadata("Failed decoding %s: %s" % (path, e)) + raise sources.BrokenMetadata( + "Failed decoding %s: %s" % (path, e)) results[name] = data if results.get('metadata_raw') is None: - raise BrokenMetadata( + raise sources.BrokenMetadata( "%s missing required file 'meta_data.json'" % source_dir) results['metadata'] = {} @@ -368,7 +365,7 @@ def metadata_from_dir(source_dir): try: md['random_seed'] = base64.b64decode(md_raw['random_seed']) except (ValueError, TypeError) as e: - raise BrokenMetadata( + raise sources.BrokenMetadata( "Badly formatted metadata random_seed entry: %s" % e) renames = ( diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index b9ade90d..4a015240 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -13,6 +13,7 @@ from cloudinit import url_helper from cloudinit import util from cloudinit.sources.helpers import openstack +from cloudinit.sources import DataSourceOracle as oracle LOG = logging.getLogger(__name__) @@ -28,8 +29,7 @@ DMI_PRODUCT_NOVA = 'OpenStack Nova' DMI_PRODUCT_COMPUTE = 'OpenStack Compute' VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE] DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud' -DMI_ASSET_TAG_ORACLE_CLOUD = 'OracleCloud.com' -VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_ORACLE_CLOUD] +VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM] class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): @@ -122,8 +122,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): False when unable to contact metadata service or when metadata format is invalid or disabled. """ - if not detect_openstack(): + oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list') + if not detect_openstack(accept_oracle=not oracle_considered): return False + if self.perform_dhcp_setup: # Setup networking in init-local stage. try: with EphemeralDHCPv4(self.fallback_interface): @@ -215,7 +217,7 @@ def read_metadata_service(base_url, ssl_details=None, return reader.read_v2() -def detect_openstack(): +def detect_openstack(accept_oracle=False): """Return True when a potential OpenStack platform is detected.""" if not util.is_x86(): return True # Non-Intel cpus don't properly report dmi product names @@ -224,6 +226,8 @@ def detect_openstack(): return True elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS: return True + elif accept_oracle and oracle._is_platform_viable(): + return True elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA: return True return False diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py new file mode 100644 index 00000000..fab39af3 --- /dev/null +++ b/cloudinit/sources/DataSourceOracle.py @@ -0,0 +1,233 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Datasource for Oracle (OCI/Oracle Cloud Infrastructure) + +OCI provides a OpenStack like metadata service which provides only +'2013-10-17' and 'latest' versions.. + +Notes: + * This datasource does not support the OCI-Classic. OCI-Classic + provides an EC2 lookalike metadata service. + * The uuid provided in DMI data is not the same as the meta-data provided + instance-id, but has an equivalent lifespan. + * We do need to support upgrade from an instance that cloud-init + identified as OpenStack. + * Both bare-metal and vms use iscsi root + * Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com +""" + +from cloudinit.url_helper import combine_url, readurl, UrlError +from cloudinit.net import dhcp +from cloudinit import net +from cloudinit import sources +from cloudinit import util +from cloudinit.net import cmdline +from cloudinit import log as logging + +import json +import re + +LOG = logging.getLogger(__name__) + +CHASSIS_ASSET_TAG = "OracleCloud.com" +METADATA_ENDPOINT = "http://169.254.169.254/openstack/" + + +class DataSourceOracle(sources.DataSource): + + dsname = 'Oracle' + system_uuid = None + vendordata_pure = None + _network_config = sources.UNSET + + def _is_platform_viable(self): + """Check platform environment to report if this datasource may run.""" + return _is_platform_viable() + + def _get_data(self): + if not self._is_platform_viable(): + return False + + # network may be configured if iscsi root. If that is the case + # then read_kernel_cmdline_config will return non-None. + if _is_iscsi_root(): + data = self.crawl_metadata() + else: + with dhcp.EphemeralDHCPv4(net.find_fallback_nic()): + data = self.crawl_metadata() + + self._crawled_metadata = data + vdata = data['2013-10-17'] + + self.userdata_raw = vdata.get('user_data') + self.system_uuid = vdata['system_uuid'] + + vd = vdata.get('vendor_data') + if vd: + self.vendordata_pure = vd + try: + self.vendordata_raw = sources.convert_vendordata(vd) + except ValueError as e: + LOG.warning("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + + mdcopies = ('public_keys',) + md = dict([(k, vdata['meta_data'].get(k)) + for k in mdcopies if k in vdata['meta_data']]) + + mdtrans = ( + # oracle meta_data.json name, cloudinit.datasource.metadata name + ('availability_zone', 'availability-zone'), + ('hostname', 'local-hostname'), + ('launch_index', 'launch-index'), + ('uuid', 'instance-id'), + ) + for dsname, ciname in mdtrans: + if dsname in vdata['meta_data']: + md[ciname] = vdata['meta_data'][dsname] + + self.metadata = md + return True + + def crawl_metadata(self): + return read_metadata() + + def check_instance_id(self, sys_cfg): + """quickly check (local only) if self.instance_id is still valid + + On Oracle, the dmi-provided system uuid differs from the instance-id + but has the same life-span.""" + return sources.instance_id_matches_system_uuid(self.system_uuid) + + def get_public_ssh_keys(self): + return sources.normalize_pubkey_data(self.metadata.get('public_keys')) + + @property + def network_config(self): + """Network config is read from initramfs provided files + If none is present, then we fall back to fallback configuration. + + One thing to note here is that this method is not currently + considered at all if there is is kernel/initramfs provided + data. In that case, stages considers that the cmdline data + overrides datasource provided data and does not consult here. + + We nonetheless return cmdline provided config if present + and fallback to generate fallback.""" + if self._network_config == sources.UNSET: + cmdline_cfg = cmdline.read_kernel_cmdline_config() + if cmdline_cfg: + self._network_config = cmdline_cfg + else: + self._network_config = self.distro.generate_fallback_config() + return self._network_config + + +def _read_system_uuid(): + sys_uuid = util.read_dmi_data('system-uuid') + return None if sys_uuid is None else sys_uuid.lower() + + +def _is_platform_viable(): + asset_tag = util.read_dmi_data('chassis-asset-tag') + return asset_tag == CHASSIS_ASSET_TAG + + +def _is_iscsi_root(): + return bool(cmdline.read_kernel_cmdline_config()) + + +def _load_index(content): + """Return a list entries parsed from content. + + OpenStack's metadata service returns a newline delimited list + of items. Oracle's implementation has html formatted list of links. + The parser here just grabs targets from + and throws away "../". + + Oracle has accepted that to be buggy and may fix in the future + to instead return a '\n' delimited plain text list. This function + will continue to work if that change is made.""" + if not content.lower().startswith(""): + return content.splitlines() + items = re.findall( + r'href="(?P[^"]*)"', content, re.MULTILINE | re.IGNORECASE) + return [i for i in items if not i.startswith(".")] + + +def read_metadata(endpoint_base=METADATA_ENDPOINT, sys_uuid=None, + version='2013-10-17'): + """Read metadata, return a dictionary. + + Each path listed in the index will be represented in the dictionary. + If the path ends in .json, then the content will be decoded and + populated into the dictionary. + + The system uuid (/sys/class/dmi/id/product_uuid) is also populated. + Example: given paths = ('user_data', 'meta_data.json') + This would return: + {version: {'user_data': b'blob', 'meta_data': json.loads(blob.decode()) + 'system_uuid': '3b54f2e0-3ab2-458d-b770-af9926eee3b2'}} + """ + endpoint = combine_url(endpoint_base, version) + "/" + if sys_uuid is None: + sys_uuid = _read_system_uuid() + if not sys_uuid: + raise sources.BrokenMetadata("Failed to read system uuid.") + + try: + resp = readurl(endpoint) + if not resp.ok(): + raise sources.BrokenMetadata( + "Bad response from %s: %s" % (endpoint, resp.code)) + except UrlError as e: + raise sources.BrokenMetadata( + "Failed to read index at %s: %s" % (endpoint, e)) + + entries = _load_index(resp.contents.decode('utf-8')) + LOG.debug("index url %s contained: %s", endpoint, entries) + + # meta_data.json is required. + mdj = 'meta_data.json' + if mdj not in entries: + raise sources.BrokenMetadata( + "Required field '%s' missing in index at %s" % (mdj, endpoint)) + + ret = {'system_uuid': sys_uuid} + for path in entries: + response = readurl(combine_url(endpoint, path)) + if path.endswith(".json"): + ret[path.rpartition(".")[0]] = ( + json.loads(response.contents.decode('utf-8'))) + else: + ret[path] = response.contents + + return {version: ret} + + +# Used to match classes to dependencies +datasources = [ + (DataSourceOracle, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": + import argparse + import os + + parser = argparse.ArgumentParser(description='Query Oracle Cloud Metadata') + parser.add_argument("--endpoint", metavar="URL", + help="The url of the metadata service.", + default=METADATA_ENDPOINT) + args = parser.parse_args() + sys_uuid = "uuid-not-available-not-root" if os.geteuid() != 0 else None + + data = read_metadata(endpoint_base=args.endpoint, sys_uuid=sys_uuid) + data['is_platform_viable'] = _is_platform_viable() + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 06e613f8..41fde9ba 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -671,6 +671,10 @@ def convert_vendordata(data, recurse=True): raise ValueError("Unknown data type for vendordata: %s" % type(data)) +class BrokenMetadata(IOError): + pass + + # 'depends' is a list of dependencies (DEP_FILESYSTEM) # ds_list is a list of 2 item lists # ds_list = [ diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index a4cf0667..8f9c1441 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -21,6 +21,8 @@ from cloudinit import sources from cloudinit import url_helper from cloudinit import util +from cloudinit.sources import BrokenMetadata + # See https://docs.openstack.org/user-guide/cli-config-drive.html LOG = logging.getLogger(__name__) @@ -68,10 +70,6 @@ class NonReadable(IOError): pass -class BrokenMetadata(IOError): - pass - - class SourceMixin(object): def _ec2_name_to_device(self, name): if not self.ec2_metadata: diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py new file mode 100644 index 00000000..7599126c --- /dev/null +++ b/cloudinit/sources/tests/test_oracle.py @@ -0,0 +1,331 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.sources import DataSourceOracle as oracle +from cloudinit.sources import BrokenMetadata +from cloudinit import helpers + +from cloudinit.tests import helpers as test_helpers + +from textwrap import dedent +import argparse +import httpretty +import json +import mock +import os +import six +import uuid + +DS_PATH = "cloudinit.sources.DataSourceOracle" +MD_VER = "2013-10-17" + + +class TestDataSourceOracle(test_helpers.CiTestCase): + """Test datasource DataSourceOracle.""" + + ds_class = oracle.DataSourceOracle + + my_uuid = str(uuid.uuid4()) + my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj", + "name": "ci-vm1", "availability_zone": "phx-ad-3", + "hostname": "ci-vm1hostname", + "launch_index": 0, "files": [], + "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"}, + "meta": {}} + + def _patch_instance(self, inst, patches): + """Patch an instance of a class 'inst'. + for each name, kwargs in patches: + inst.name = mock.Mock(**kwargs) + returns a namespace object that has + namespace.name = mock.Mock(**kwargs) + Do not bother with cleanup as instance is assumed transient.""" + mocks = argparse.Namespace() + for name, kwargs in patches.items(): + imock = mock.Mock(name=name, spec=getattr(inst, name), **kwargs) + setattr(mocks, name, imock) + setattr(inst, name, imock) + return mocks + + def _get_ds(self, sys_cfg=None, distro=None, paths=None, ud_proc=None, + patches=None): + if sys_cfg is None: + sys_cfg = {} + if patches is None: + patches = {} + if paths is None: + tmpd = self.tmp_dir() + dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), + 'run_dir': self.tmp_path('run_dir')} + for d in dirs.values(): + os.mkdir(d) + paths = helpers.Paths(dirs) + + ds = self.ds_class(sys_cfg=sys_cfg, distro=distro, + paths=paths, ud_proc=ud_proc) + + return ds, self._patch_instance(ds, patches) + + def test_platform_not_viable_returns_false(self): + ds, mocks = self._get_ds( + patches={'_is_platform_viable': {'return_value': False}}) + self.assertFalse(ds._get_data()) + mocks._is_platform_viable.assert_called_once_with() + + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_without_userdata(self, m_is_iscsi_root): + """If no user-data is provided, it should not be in return dict.""" + ds, mocks = self._get_ds(patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + self.assertTrue(ds._get_data()) + mocks._is_platform_viable.assert_called_once_with() + mocks.crawl_metadata.assert_called_once_with() + self.assertEqual(self.my_uuid, ds.system_uuid) + self.assertEqual(self.my_md['availability_zone'], ds.availability_zone) + self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys()) + self.assertEqual(self.my_md['uuid'], ds.get_instance_id()) + self.assertIsNone(ds.userdata_raw) + + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_with_vendordata(self, m_is_iscsi_root): + """Test with vendor data.""" + vd = {'cloud-init': '#cloud-config\nkey: value'} + ds, mocks = self._get_ds(patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md, + 'vendor_data': vd}}}}) + self.assertTrue(ds._get_data()) + mocks._is_platform_viable.assert_called_once_with() + mocks.crawl_metadata.assert_called_once_with() + self.assertEqual(vd, ds.vendordata_pure) + self.assertEqual(vd['cloud-init'], ds.vendordata_raw) + + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_with_userdata(self, m_is_iscsi_root): + """Ensure user-data is populated if present and is binary.""" + my_userdata = b'abcdefg' + ds, mocks = self._get_ds(patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md, + 'user_data': my_userdata}}}}) + self.assertTrue(ds._get_data()) + mocks._is_platform_viable.assert_called_once_with() + mocks.crawl_metadata.assert_called_once_with() + self.assertEqual(self.my_uuid, ds.system_uuid) + self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys()) + self.assertEqual(self.my_md['uuid'], ds.get_instance_id()) + self.assertEqual(my_userdata, ds.userdata_raw) + + @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config): + """network_config should read kernel cmdline.""" + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + ncfg = {'version': 1, 'config': [{'a': 'b'}]} + m_cmdline_config.return_value = ncfg + self.assertTrue(ds._get_data()) + self.assertEqual(ncfg, ds.network_config) + m_cmdline_config.assert_called_once_with() + self.assertFalse(distro.generate_fallback_config.called) + + @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config): + """test that fallback network is generated if no kernel cmdline.""" + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + ncfg = {'version': 1, 'config': [{'a': 'b'}]} + m_cmdline_config.return_value = None + self.assertTrue(ds._get_data()) + ncfg = {'version': 1, 'config': [{'distro1': 'value'}]} + distro.generate_fallback_config.return_value = ncfg + self.assertEqual(ncfg, ds.network_config) + m_cmdline_config.assert_called_once_with() + distro.generate_fallback_config.assert_called_once_with() + self.assertEqual(1, m_cmdline_config.call_count) + + # test that the result got cached, and the methods not re-called. + self.assertEqual(ncfg, ds.network_config) + self.assertEqual(1, m_cmdline_config.call_count) + + +@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4())) +class TestReadMetaData(test_helpers.HttprettyTestCase): + """Test the read_metadata which interacts with http metadata service.""" + + mdurl = oracle.METADATA_ENDPOINT + my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj", + "name": "ci-vm1", "availability_zone": "phx-ad-3", + "hostname": "ci-vm1hostname", + "launch_index": 0, "files": [], + "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"}, + "meta": {}} + + def populate_md(self, data): + """call httppretty.register_url for each item dict 'data', + including valid indexes. Text values converted to bytes.""" + httpretty.register_uri( + httpretty.GET, self.mdurl + MD_VER + "/", + '\n'.join(data.keys()).encode('utf-8')) + for k, v in data.items(): + httpretty.register_uri( + httpretty.GET, self.mdurl + MD_VER + "/" + k, + v if not isinstance(v, six.text_type) else v.encode('utf-8')) + + def test_broken_no_sys_uuid(self, m_read_system_uuid): + """Datasource requires ability to read system_uuid and true return.""" + m_read_system_uuid.return_value = None + self.assertRaises(BrokenMetadata, oracle.read_metadata) + + def test_broken_no_metadata_json(self, m_read_system_uuid): + """Datasource requires meta_data.json.""" + httpretty.register_uri( + httpretty.GET, self.mdurl + MD_VER + "/", + '\n'.join(['user_data']).encode('utf-8')) + with self.assertRaises(BrokenMetadata) as cm: + oracle.read_metadata() + self.assertIn("Required field 'meta_data.json' missing", + str(cm.exception)) + + def test_with_userdata(self, m_read_system_uuid): + data = {'user_data': b'#!/bin/sh\necho hi world\n', + 'meta_data.json': json.dumps(self.my_md)} + self.populate_md(data) + result = oracle.read_metadata()[MD_VER] + self.assertEqual(data['user_data'], result['user_data']) + self.assertEqual(self.my_md, result['meta_data']) + + def test_without_userdata(self, m_read_system_uuid): + data = {'meta_data.json': json.dumps(self.my_md)} + self.populate_md(data) + result = oracle.read_metadata()[MD_VER] + self.assertNotIn('user_data', result) + self.assertEqual(self.my_md, result['meta_data']) + + def test_unknown_fields_included(self, m_read_system_uuid): + """Unknown fields listed in index should be included. + And those ending in .json should be decoded.""" + some_data = {'key1': 'data1', 'subk1': {'subd1': 'subv'}} + some_vendor_data = {'cloud-init': 'foo'} + data = {'meta_data.json': json.dumps(self.my_md), + 'some_data.json': json.dumps(some_data), + 'vendor_data.json': json.dumps(some_vendor_data), + 'other_blob': b'this is blob'} + self.populate_md(data) + result = oracle.read_metadata()[MD_VER] + self.assertNotIn('user_data', result) + self.assertEqual(self.my_md, result['meta_data']) + self.assertEqual(some_data, result['some_data']) + self.assertEqual(some_vendor_data, result['vendor_data']) + self.assertEqual(data['other_blob'], result['other_blob']) + + +class TestIsPlatformViable(test_helpers.CiTestCase): + @mock.patch(DS_PATH + ".util.read_dmi_data", + return_value=oracle.CHASSIS_ASSET_TAG) + def test_expected_viable(self, m_read_dmi_data): + """System with known chassis tag is viable.""" + self.assertTrue(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + @mock.patch(DS_PATH + ".util.read_dmi_data", return_value=None) + def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data): + """System without known chassis tag is not viable.""" + self.assertFalse(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + @mock.patch(DS_PATH + ".util.read_dmi_data", return_value="LetsGoCubs") + def test_expected_not_viable_other(self, m_read_dmi_data): + """System with unnown chassis tag is not viable.""" + self.assertFalse(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + +class TestLoadIndex(test_helpers.CiTestCase): + """_load_index handles parsing of an index into a proper list. + The tests here guarantee correct parsing of html version or + a fixed version. See the function docstring for more doc.""" + + _known_html_api_versions = dedent("""\ + + Index of /openstack/ + +

Index of /openstack/


../
+        2013-10-17/   27-Jun-2018 12:22  -
+        latest/           27-Jun-2018 12:22  -
+        

+ """) + + _known_html_contents = dedent("""\ + + Index of /openstack/2013-10-17/ + +

Index of /openstack/2013-10-17/


../
+        meta_data.json  27-Jun-2018 12:22  679
+        user_data            27-Jun-2018 12:22  146
+        

+ """) + + def test_parse_html(self): + """Test parsing of lower case html.""" + self.assertEqual( + ['2013-10-17/', 'latest/'], + oracle._load_index(self._known_html_api_versions)) + self.assertEqual( + ['meta_data.json', 'user_data'], + oracle._load_index(self._known_html_contents)) + + def test_parse_html_upper(self): + """Test parsing of upper case html, although known content is lower.""" + def _toupper(data): + return data.replace("", "HTML>") + + self.assertEqual( + ['2013-10-17/', 'latest/'], + oracle._load_index(_toupper(self._known_html_api_versions))) + self.assertEqual( + ['meta_data.json', 'user_data'], + oracle._load_index(_toupper(self._known_html_contents))) + + def test_parse_newline_list_with_endl(self): + """Test parsing of newline separated list with ending newline.""" + self.assertEqual( + ['2013-10-17/', 'latest/'], + oracle._load_index("\n".join(["2013-10-17/", "latest/", ""]))) + self.assertEqual( + ['meta_data.json', 'user_data'], + oracle._load_index("\n".join(["meta_data.json", "user_data", ""]))) + + def test_parse_newline_list_without_endl(self): + """Test parsing of newline separated list with no ending newline. + + Actual openstack implementation does not include trailing newline.""" + self.assertEqual( + ['2013-10-17/', 'latest/'], + oracle._load_index("\n".join(["2013-10-17/", "latest/"]))) + self.assertEqual( + ['meta_data.json', 'user_data'], + oracle._load_index("\n".join(["meta_data.json", "user_data"]))) + + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 30e57d85..83034589 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -189,6 +189,7 @@ Follow for more information. datasources/nocloud.rst datasources/opennebula.rst datasources/openstack.rst + datasources/oracle.rst datasources/ovf.rst datasources/smartos.rst datasources/fallback.rst diff --git a/doc/rtd/topics/datasources/oracle.rst b/doc/rtd/topics/datasources/oracle.rst new file mode 100644 index 00000000..f2383cee --- /dev/null +++ b/doc/rtd/topics/datasources/oracle.rst @@ -0,0 +1,26 @@ +.. _datasource_oracle: + +Oracle +====== + +This datasource reads metadata, vendor-data and user-data from +`Oracle Compute Infrastructure`_ (OCI). + +Oracle Platform +--------------- +OCI provides bare metal and virtual machines. In both cases, +the platform identifies itself via DMI data in the chassis asset tag +with the string 'OracleCloud.com'. + +Oracle's platform provides a metadata service that mimics the 2013-10-17 +version of OpenStack metadata service. Initially support for Oracle +was done via the OpenStack datasource. + +Cloud-init has a specific datasource for Oracle in order to: + a. allow and support future growth of the OCI platform. + b. address small differences between OpenStack and Oracle metadata + implementation. + + +.. _Oracle Compute Infrastructure: https://cloud.oracle.com/ +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 1a5a3db2..6b01a4ea 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -20,6 +20,7 @@ from cloudinit.sources import ( DataSourceNoCloud as NoCloud, DataSourceOpenNebula as OpenNebula, DataSourceOpenStack as OpenStack, + DataSourceOracle as Oracle, DataSourceOVF as OVF, DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, @@ -37,6 +38,7 @@ DEFAULT_LOCAL = [ IBMCloud.DataSourceIBMCloud, NoCloud.DataSourceNoCloud, OpenNebula.DataSourceOpenNebula, + Oracle.DataSourceOracle, OVF.DataSourceOVF, SmartOS.DataSourceSmartOS, Ec2.DataSourceEc2Local, diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index d862f4bc..6e1e971b 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -16,7 +16,7 @@ from six import StringIO from cloudinit import helpers from cloudinit import settings -from cloudinit.sources import convert_vendordata, UNSET +from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources.helpers import openstack from cloudinit import util @@ -186,7 +186,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('meta_data.json'): os_files[k] = json.dumps(os_meta) _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) + self.assertRaises(BrokenMetadata, _read_metadata_service) def test_userdata_empty(self): os_files = copy.deepcopy(OS_FILES) @@ -217,7 +217,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('vendor_data.json'): os_files[k] = '{' # some invalid json _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) + self.assertRaises(BrokenMetadata, _read_metadata_service) def test_metadata_invalid(self): os_files = copy.deepcopy(OS_FILES) @@ -225,7 +225,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): if k.endswith('meta_data.json'): os_files[k] = '{' # some invalid json _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) + self.assertRaises(BrokenMetadata, _read_metadata_service) @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_datasource(self, m_dhcp): @@ -525,8 +525,11 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_dmi.side_effect = fake_dmi_read self.assertTrue( - ds.detect_openstack(), + ds.detect_openstack(accept_oracle=True), 'Expected detect_openstack == True on OracleCloud.com') + self.assertFalse( + ds.detect_openstack(accept_oracle=False), + 'Expected detect_openstack == False.') @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 64d9f9f8..e08e7908 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -12,6 +12,7 @@ from cloudinit.tests.helpers import ( from cloudinit.sources import DataSourceIBMCloud as ds_ibm from cloudinit.sources import DataSourceSmartOS as ds_smartos +from cloudinit.sources import DataSourceOracle as ds_oracle UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") @@ -598,6 +599,18 @@ class TestIsIBMProvisioning(DsIdentifyBase): self.assertIn("from current boot", ret.stderr) +class TestOracle(DsIdentifyBase): + def test_found_by_chassis(self): + """Simple positive test of Oracle by chassis id.""" + self._test_ds_found('Oracle') + + def test_not_found(self): + """Simple negative test of Oracle.""" + mycfg = copy.deepcopy(VALID_CFG['Oracle']) + mycfg['files'][P_CHASSIS_ASSET_TAG] = "Not Oracle" + self._check_via_dict(mycfg, rc=RC_NOT_FOUND) + + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" if disks is None: @@ -838,6 +851,12 @@ VALID_CFG = { }, ], }, + 'Oracle': { + 'ds': 'Oracle', + 'files': { + P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + '\n', + } + }, 'SmartOS-bhyve': { 'ds': 'SmartOS', 'mocks': [ diff --git a/tools/ds-identify b/tools/ds-identify index ce0477a5..fcc60149 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -116,7 +116,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -1036,6 +1036,12 @@ dscheck_Hetzner() { return ${DS_NOT_FOUND} } +dscheck_Oracle() { + local asset_tag="OracleCloud.com" + dmi_chassis_asset_tag_matches "${asset_tag}" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + is_ibm_provisioning() { local pcfg="${PATH_ROOT}/root/provisioningConfiguration.cfg" local logf="${PATH_ROOT}/root/swinstall.log" -- cgit v1.2.3 From 2320c3de2712c2f320b0d8af4aa129219cc2ad04 Mon Sep 17 00:00:00 2001 From: Andy Liu Date: Fri, 24 Aug 2018 22:25:37 +0000 Subject: logging: Add logging config type hyperv for reporting via Azure KVP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Linux guests can provide information to Hyper-V hosts via KVP. KVP allows the guests to provide any string key-value-pairs back to the host's registry. On linux, kvp communication pools are presented as pool files in /var/lib/hyperv/.kvp_pool_#. The following reporting configuration can enable this kvp reporting in addition to default logging if the pool files exist: reporting:     logging:         type: log     telemetry:         type: hyperv --- cloudinit/cloud.py | 4 +- cloudinit/cmd/main.py | 4 +- cloudinit/distros/__init__.py | 2 +- cloudinit/reporting/__init__.py | 2 +- cloudinit/reporting/handlers.py | 255 +++++++++++++++++++++++++++++++ cloudinit/stages.py | 2 +- tests/unittests/test_reporting_hyperv.py | 134 ++++++++++++++++ 7 files changed, 396 insertions(+), 7 deletions(-) create mode 100644 tests/unittests/test_reporting_hyperv.py (limited to 'tests/unittests') diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 6d12c437..7ae98e1c 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -47,7 +47,7 @@ class Cloud(object): @property def cfg(self): - # Ensure that not indirectly modified + # Ensure that cfg is not indirectly modified return copy.deepcopy(self._cfg) def run(self, name, functor, args, freq=None, clear_on_fail=False): @@ -61,7 +61,7 @@ class Cloud(object): return None return fn - # The rest of thes are just useful proxies + # The rest of these are just useful proxies def get_userdata(self, apply_filter=True): return self.datasource.get_userdata(apply_filter) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index d6ba90f4..c0edee18 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -315,7 +315,7 @@ def main_init(name, args): existing = "trust" init.purge_cache() - # Delete the non-net file as well + # Delete the no-net file as well util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) # Stage 5 @@ -339,7 +339,7 @@ def main_init(name, args): " Likely bad things to come!")) if not args.force: init.apply_network_config(bring_up=not args.local) - LOG.debug("[%s] Exiting without datasource in local mode", mode) + LOG.debug("[%s] Exiting without datasource", mode) if mode == sources.DSMODE_LOCAL: return (None, []) else: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index ab0b0776..fde054e9 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -157,7 +157,7 @@ class Distro(object): distro) header = '\n'.join([ "# Converted from network_config for distro %s" % distro, - "# Implmentation of _write_network_config is needed." + "# Implementation of _write_network_config is needed." ]) ns = network_state.parse_net_config_data(netconfig) contents = eni.network_state_to_eni( diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py index 1ed2b487..e047767e 100644 --- a/cloudinit/reporting/__init__.py +++ b/cloudinit/reporting/__init__.py @@ -18,7 +18,7 @@ DEFAULT_CONFIG = { def update_configuration(config): - """Update the instanciated_handler_registry. + """Update the instantiated_handler_registry. :param config: The dictionary containing changes to apply. If a key is given diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 4066076c..4b4bb396 100644 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -1,17 +1,34 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc +import fcntl import json import six +import os +import re +import struct +import threading +import time from cloudinit import log as logging from cloudinit.registry import DictRegistry from cloudinit import (url_helper, util) +from datetime import datetime +if six.PY2: + import multiprocessing.queues as queue + from multiprocessing.queues import JoinableQueue as JQueue +else: + import queue + from queue import Queue as JQueue LOG = logging.getLogger(__name__) +class ReportException(Exception): + pass + + @six.add_metaclass(abc.ABCMeta) class ReportingHandler(object): """Base class for report handlers. @@ -85,9 +102,247 @@ class WebHookHandler(ReportingHandler): LOG.warning("failed posting event: %s", event.as_string()) +class HyperVKvpReportingHandler(ReportingHandler): + """ + Reports events to a Hyper-V host using Key-Value-Pair exchange protocol + and can be used to obtain high level diagnostic information from the host. + + To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be + running. It reads the kvp_file when the host requests the guest to + enumerate the KVP's. + + This reporter collates all events for a module (origin|name) in a single + json string in the dictionary. + + For more information, see + https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests + """ + HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048 + HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512 + HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE + + HV_KVP_EXCHANGE_MAX_VALUE_SIZE) + EVENT_PREFIX = 'CLOUD_INIT' + MSG_KEY = 'msg' + RESULT_KEY = 'result' + DESC_IDX_KEY = 'msg_i' + JSON_SEPARATORS = (',', ':') + KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1' + + def __init__(self, + kvp_file_path=KVP_POOL_FILE_GUEST, + event_types=None): + super(HyperVKvpReportingHandler, self).__init__() + self._kvp_file_path = kvp_file_path + self._event_types = event_types + self.running = False + self.queue_lock = threading.Lock() + self.running_lock = threading.Lock() + self.q = JQueue() + self.kvp_file = None + self.incarnation_no = self._get_incarnation_no() + self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, + self.incarnation_no) + self._current_offset = 0 + + def _get_incarnation_no(self): + """ + use the time passed as the incarnation number. + the incarnation number is the number which are used to + distinguish the old data stored in kvp and the new data. + """ + uptime_str = util.uptime() + try: + return int(time.time() - float(uptime_str)) + except ValueError: + LOG.warning("uptime '%s' not in correct format.", uptime_str) + return 0 + + def _iterate_kvps(self, offset): + """iterate the kvp file from the current offset.""" + try: + with open(self._kvp_file_path, 'rb+') as f: + self.kvp_file = f + fcntl.flock(f, fcntl.LOCK_EX) + f.seek(offset) + record_data = f.read(self.HV_KVP_RECORD_SIZE) + while len(record_data) == self.HV_KVP_RECORD_SIZE: + self._current_offset += self.HV_KVP_RECORD_SIZE + kvp_item = self._decode_kvp_item(record_data) + yield kvp_item + record_data = f.read(self.HV_KVP_RECORD_SIZE) + fcntl.flock(f, fcntl.LOCK_UN) + finally: + self.kvp_file = None + + def _event_key(self, event): + """ + the event key format is: + CLOUD_INIT||| + """ + return u"{0}|{1}|{2}".format(self.event_key_prefix, + event.event_type, event.name) + + def _encode_kvp_item(self, key, value): + data = (struct.pack("%ds%ds" % ( + self.HV_KVP_EXCHANGE_MAX_KEY_SIZE, + self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), + key.encode('utf-8'), value.encode('utf-8'))) + return data + + def _decode_kvp_item(self, record_data): + record_data_len = len(record_data) + if record_data_len != self.HV_KVP_RECORD_SIZE: + raise ReportException( + "record_data len not correct {0} {1}." + .format(record_data_len, self.HV_KVP_RECORD_SIZE)) + k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8') + .strip('\x00')) + v = ( + record_data[ + self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE + ].decode('utf-8').strip('\x00')) + + return {'key': k, 'value': v} + + def _update_kvp_item(self, record_data): + if self.kvp_file is None: + raise ReportException( + "kvp file '{0}' not opened." + .format(self._kvp_file_path)) + self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1) + self.kvp_file.write(record_data) + + def _append_kvp_item(self, record_data): + with open(self._kvp_file_path, 'rb+') as f: + fcntl.flock(f, fcntl.LOCK_EX) + # seek to end of the file + f.seek(0, 2) + f.write(record_data) + f.flush() + fcntl.flock(f, fcntl.LOCK_UN) + self._current_offset = f.tell() + + def _break_down(self, key, meta_data, description): + del meta_data[self.MSG_KEY] + des_in_json = json.dumps(description) + des_in_json = des_in_json[1:(len(des_in_json) - 1)] + i = 0 + result_array = [] + message_place_holder = "\"" + self.MSG_KEY + "\":\"\"" + while True: + meta_data[self.DESC_IDX_KEY] = i + meta_data[self.MSG_KEY] = '' + data_without_desc = json.dumps(meta_data, + separators=self.JSON_SEPARATORS) + room_for_desc = ( + self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE - + len(data_without_desc) - 8) + value = data_without_desc.replace( + message_place_holder, + '"{key}":"{desc}"'.format( + key=self.MSG_KEY, desc=des_in_json[:room_for_desc])) + result_array.append(self._encode_kvp_item(key, value)) + i += 1 + des_in_json = des_in_json[room_for_desc:] + if len(des_in_json) == 0: + break + return result_array + + def _encode_event(self, event): + """ + encode the event into kvp data bytes. + if the event content reaches the maximum length of kvp value. + then it would be cut to multiple slices. + """ + key = self._event_key(event) + meta_data = { + "name": event.name, + "type": event.event_type, + "ts": (datetime.utcfromtimestamp(event.timestamp) + .isoformat() + 'Z'), + } + if hasattr(event, self.RESULT_KEY): + meta_data[self.RESULT_KEY] = event.result + meta_data[self.MSG_KEY] = event.description + value = json.dumps(meta_data, separators=self.JSON_SEPARATORS) + # if it reaches the maximum length of kvp value, + # break it down to slices. + # this should be very corner case. + if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE: + return self._break_down(key, meta_data, event.description) + else: + data = self._encode_kvp_item(key, value) + return [data] + + def _publish_event_routine(self): + while True: + event = None + try: + # acquire the lock. + event = self.q.get_nowait() + need_append = True + try: + if not os.path.exists(self._kvp_file_path): + LOG.warning( + "skip writing events %s to %s. file not present.", + event.as_string(), + self._kvp_file_path) + encoded_event = self._encode_event(event) + # for each encoded_event + for encoded_data in (encoded_event): + for kvp in self._iterate_kvps(self._current_offset): + match = ( + re.match( + r"^{0}\|(\d+)\|.+" + .format(self.EVENT_PREFIX), + kvp['key'] + )) + if match: + match_groups = match.groups(0) + if int(match_groups[0]) < self.incarnation_no: + need_append = False + self._update_kvp_item(encoded_data) + break + if need_append: + self._append_kvp_item(encoded_data) + except IOError as e: + LOG.warning( + "failed posting event to kvp: %s e:%s", + event.as_string(), e) + self.running = False + break + finally: + self.q.task_done() + except queue.Empty: + with self.queue_lock: + # double check the queue is empty + if self.q.empty(): + self.running = False + break + + def trigger_publish_event(self): + if not self.running: + with self.running_lock: + if not self.running: + self.running = True + thread = threading.Thread( + target=self._publish_event_routine) + thread.start() + + # since the saving to the kvp pool can be a time costing task + # if the kvp pool already contains a chunk of data, + # so defer it to another thread. + def publish_event(self, event): + if (not self._event_types or event.event_type in self._event_types): + with self.queue_lock: + self.q.put(event) + self.trigger_publish_event() + + available_handlers = DictRegistry() available_handlers.register_item('log', LogHandler) available_handlers.register_item('print', PrintHandler) available_handlers.register_item('webhook', WebHookHandler) +available_handlers.register_item('hyperv', HyperVKvpReportingHandler) # vi: ts=4 expandtab diff --git a/cloudinit/stages.py b/cloudinit/stages.py index c132b57d..8874d405 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -510,7 +510,7 @@ class Init(object): # The default frequency if handlers don't have one 'frequency': frequency, # This will be used when new handlers are found - # to help write there contents to files with numbered + # to help write their contents to files with numbered # names... 'handlercount': 0, 'excluded': excluded, diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py new file mode 100644 index 00000000..2e64c6c7 --- /dev/null +++ b/tests/unittests/test_reporting_hyperv.py @@ -0,0 +1,134 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.reporting import events +from cloudinit.reporting import handlers + +import json +import os + +from cloudinit import util +from cloudinit.tests.helpers import CiTestCase + + +class TestKvpEncoding(CiTestCase): + def test_encode_decode(self): + kvp = {'key': 'key1', 'value': 'value1'} + kvp_reporting = handlers.HyperVKvpReportingHandler() + data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) + self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) + decoded_kvp = kvp_reporting._decode_kvp_item(data) + self.assertEqual(kvp, decoded_kvp) + + +class TextKvpReporter(CiTestCase): + def setUp(self): + super(TextKvpReporter, self).setUp() + self.tmp_file_path = self.tmp_path('kvp_pool_file') + util.ensure_file(self.tmp_file_path) + + def test_event_type_can_be_filtered(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path, + event_types=['foo', 'bar']) + + reporter.publish_event( + events.ReportingEvent('foo', 'name', 'description')) + reporter.publish_event( + events.ReportingEvent('some_other', 'name', 'description3')) + reporter.q.join() + + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + reporter.publish_event( + events.ReportingEvent('bar', 'name', 'description2')) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(2, len(kvps)) + + self.assertIn('foo', kvps[0]['key']) + self.assertIn('bar', kvps[1]['key']) + self.assertNotIn('some_other', kvps[0]['key']) + self.assertNotIn('some_other', kvps[1]['key']) + + def test_events_are_over_written(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + + self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) + + reporter.publish_event( + events.ReportingEvent('foo', 'name1', 'description')) + reporter.publish_event( + events.ReportingEvent('foo', 'name2', 'description')) + reporter.q.join() + self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) + + reporter2 = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter2.incarnation_no = reporter.incarnation_no + 1 + reporter2.publish_event( + events.ReportingEvent('foo', 'name3', 'description')) + reporter2.q.join() + + self.assertEqual(2, len(list(reporter2._iterate_kvps(0)))) + + def test_events_with_higher_incarnation_not_over_written(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + + self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) + + reporter.publish_event( + events.ReportingEvent('foo', 'name1', 'description')) + reporter.publish_event( + events.ReportingEvent('foo', 'name2', 'description')) + reporter.q.join() + self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) + + reporter3 = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter3.incarnation_no = reporter.incarnation_no - 1 + reporter3.publish_event( + events.ReportingEvent('foo', 'name3', 'description')) + reporter3.q.join() + self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) + + def test_finish_event_result_is_logged(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter.publish_event( + events.FinishReportingEvent('name2', 'description1', + result=events.status.FAIL)) + reporter.q.join() + self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value']) + + def test_file_operation_issue(self): + os.remove(self.tmp_file_path) + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + reporter.publish_event( + events.FinishReportingEvent('name2', 'description1', + result=events.status.FAIL)) + reporter.q.join() + + def test_event_very_long(self): + reporter = handlers.HyperVKvpReportingHandler( + kvp_file_path=self.tmp_file_path) + description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE + long_event = events.FinishReportingEvent( + 'event_name', + description, + result=events.status.FAIL) + reporter.publish_event(long_event) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(3, len(kvps)) + + # restore from the kvp to see the content are all there + full_description = '' + for i in range(len(kvps)): + msg_slice = json.loads(kvps[i]['value']) + self.assertEqual(msg_slice['msg_i'], i) + full_description += msg_slice['msg'] + self.assertEqual(description, full_description) -- cgit v1.2.3 From 9c35f9762028b8bf15cdcd6b42c0fafc233ddda3 Mon Sep 17 00:00:00 2001 From: Pengpeng Sun Date: Tue, 28 Aug 2018 22:52:28 +0000 Subject: VMWare: Fix a network config bug in vm with static IPv4 and no gateway. The issue is when customize a VM with static IPv4 and without gateway, it will still extend route list and will loop a gateways list which is None. This fix is to make sure when no gateway is here, it will not extend route list. LP: #1766538 --- cloudinit/sources/helpers/vmware/imc/config_nic.py | 2 +- tests/unittests/test_vmware_config_file.py | 115 +++++++++++++++++++++ 2 files changed, 116 insertions(+), 1 deletion(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 3ef8c624..e1890e23 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -164,7 +164,7 @@ class NicConfigurator(object): return ([subnet], route_list) # Add routes if there is no primary nic - if not self._primaryNic: + if not self._primaryNic and v4.gateways: route_list.extend(self.gen_ipv4_route(nic, v4.gateways, v4.netmask)) diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 036f6879..602dedb0 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -2,11 +2,15 @@ # Copyright (C) 2016 VMware INC. # # Author: Sankar Tanguturi +# Pengpeng Sun # # This file is part of cloud-init. See LICENSE file for license information. import logging +import os import sys +import tempfile +import textwrap from cloudinit.sources.DataSourceOVF import get_network_config_from_conf from cloudinit.sources.DataSourceOVF import read_vmware_imc @@ -343,4 +347,115 @@ class TestVmwareConfigFile(CiTestCase): conf = Config(cf) self.assertEqual("test-script", conf.custom_script_name) + +class TestVmwareNetConfig(CiTestCase): + """Test conversion of vmware config to cloud-init config.""" + + def _get_NicConfigurator(self, text): + fp = None + try: + with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(), + delete=False) as fp: + fp.write(text) + fp.close() + cfg = Config(ConfigFile(fp.name)) + return NicConfigurator(cfg.nics, use_system_devices=False) + finally: + if fp: + os.unlink(fp.name) + + def test_non_primary_nic_without_gateway(self): + """A non primary nic set is not required to have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}], + nc.generate()) + + def test_non_primary_nic_with_gateway(self): + """A non primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}, + {'type': 'route', 'destination': '10.20.84.0/22', + 'gateway': '10.20.87.253', 'metric': 10000}], + nc.generate()) + + def test_a_primary_nic_with_gateway(self): + """A primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + PRIMARY = true + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0', + 'gateway': '10.20.87.253'}]}], + nc.generate()) + + # vi: ts=4 expandtab -- cgit v1.2.3 From db50bc0d999e3a90136864a774f85e4e15b144e8 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 5 Sep 2018 14:17:16 +0000 Subject: sysconfig: refactor sysconfig to accept distro specific templates paths Multiple distros use sysconfig format but have different content and paths to certain files. Update distros to specify these template paths in their renderer_configs dictionary. --- cloudinit/cmd/devel/net_convert.py | 14 +- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/opensuse.py | 15 +- cloudinit/distros/rhel.py | 10 + cloudinit/net/eni.py | 2 +- cloudinit/net/netplan.py | 2 +- cloudinit/net/renderer.py | 9 +- cloudinit/net/sysconfig.py | 78 +- cloudinit/tests/helpers.py | 11 +- .../unittests/test_datasource/test_configdrive.py | 6 +- tests/unittests/test_distros/test_netconfig.py | 971 +++++++++------------ tests/unittests/test_net.py | 544 +++++++++++- 12 files changed, 1029 insertions(+), 635 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 271dc5ed..a0f58a0a 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -10,6 +10,7 @@ import yaml from cloudinit.sources.helpers import openstack from cloudinit.sources import DataSourceAzure as azure +from cloudinit import distros from cloudinit.net import eni, netplan, network_state, sysconfig from cloudinit import log @@ -36,6 +37,11 @@ def get_parser(parser=None): metavar="PATH", help="directory to place output in", required=True) + parser.add_argument("-D", "--distro", + choices=[item for sublist in + distros.OSFAMILIES.values() + for item in sublist], + required=True) parser.add_argument("-m", "--mac", metavar="name,mac", action='append', @@ -96,14 +102,20 @@ def handle_args(name, args): sys.stderr.write('\n'.join([ "", "Internal State", yaml.dump(ns, default_flow_style=False, indent=4), ""])) + distro_cls = distros.fetch(args.distro) + distro = distro_cls(args.distro, {}, None) + config = {} if args.output_kind == "eni": r_cls = eni.Renderer + config = distro.renderer_configs.get('eni') elif args.output_kind == "netplan": r_cls = netplan.Renderer + config = distro.renderer_configs.get('netplan') else: r_cls = sysconfig.Renderer + config = distro.renderer_configs.get('sysconfig') - r = r_cls() + r = r_cls(config=config) sys.stderr.write(''.join([ "Read input format '%s' from '%s'.\n" % ( args.kind, args.network_data.name), diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index fde054e9..d9101ce6 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -91,7 +91,7 @@ class Distro(object): LOG.debug("Selected renderer '%s' from priority list: %s", name, priority) renderer = render_cls(config=self.renderer_configs.get(name)) - renderer.render_network_config(network_config=network_config) + renderer.render_network_config(network_config) return [] def _find_tz_file(self, tz): diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 9f90e95e..1fe896aa 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -28,13 +28,23 @@ class Distro(distros.Distro): hostname_conf_fn = '/etc/HOSTNAME' init_cmd = ['service'] locale_conf_fn = '/etc/sysconfig/language' - network_conf_fn = '/etc/sysconfig/network' + network_conf_fn = '/etc/sysconfig/network/config' network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' resolve_conf_fn = '/etc/resolv.conf' route_conf_tpl = '/etc/sysconfig/network/ifroute-%s' systemd_hostname_conf_fn = '/etc/hostname' systemd_locale_conf_fn = '/etc/locale.conf' tz_local_fn = '/etc/localtime' + renderer_configs = { + 'sysconfig': { + 'control': 'etc/sysconfig/network/config', + 'iface_templates': '%(base)s/network/ifcfg-%(name)s', + 'route_templates': { + 'ipv4': '%(base)s/network/ifroute-%(name)s', + 'ipv6': '%(base)s/network/ifroute-%(name)s', + } + } + } def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -208,6 +218,9 @@ class Distro(distros.Distro): nameservers, searchservers) return dev_names + def _write_network_config(self, netconfig): + return self._supported_write_network_config(netconfig) + @property def preferred_ntp_clients(self): """The preferred ntp client is dependent on the version.""" diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 1fecb619..ff513438 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -39,6 +39,16 @@ class Distro(distros.Distro): resolve_conf_fn = "/etc/resolv.conf" tz_local_fn = "/etc/localtime" usr_lib_exec = "/usr/libexec" + renderer_configs = { + 'sysconfig': { + 'control': 'etc/sysconfig/network', + 'iface_templates': '%(base)s/network-scripts/ifcfg-%(name)s', + 'route_templates': { + 'ipv4': '%(base)s/network-scripts/route-%(name)s', + 'ipv6': '%(base)s/network-scripts/route6-%(name)s' + } + } + } def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 80be2429..c6f631a9 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -480,7 +480,7 @@ class Renderer(renderer.Renderer): return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n" - def render_network_state(self, network_state, target=None): + def render_network_state(self, network_state, templates=None, target=None): fpeni = util.target_path(target, self.eni_path) util.ensure_dir(os.path.dirname(fpeni)) header = self.eni_header if self.eni_header else "" diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 6352e78c..bc1087f9 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -189,7 +189,7 @@ class Renderer(renderer.Renderer): self._postcmds = config.get('postcmds', False) self.clean_default = config.get('clean_default', True) - def render_network_state(self, network_state, target): + def render_network_state(self, network_state, templates=None, target=None): # check network state for version # if v2, then extract network_state.config # else render_v2_from_state diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py index 57652e27..5f32e90f 100644 --- a/cloudinit/net/renderer.py +++ b/cloudinit/net/renderer.py @@ -45,11 +45,14 @@ class Renderer(object): return content.getvalue() @abc.abstractmethod - def render_network_state(self, network_state, target=None): + def render_network_state(self, network_state, templates=None, + target=None): """Render network state.""" - def render_network_config(self, network_config, target=None): + def render_network_config(self, network_config, templates=None, + target=None): return self.render_network_state( - network_state=parse_net_config_data(network_config), target=target) + network_state=parse_net_config_data(network_config), + templates=templates, target=target) # vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 3d719238..66e970e0 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -91,19 +91,20 @@ class ConfigMap(object): class Route(ConfigMap): """Represents a route configuration.""" - route_fn_tpl_ipv4 = '%(base)s/network-scripts/route-%(name)s' - route_fn_tpl_ipv6 = '%(base)s/network-scripts/route6-%(name)s' - - def __init__(self, route_name, base_sysconf_dir): + def __init__(self, route_name, base_sysconf_dir, + ipv4_tpl, ipv6_tpl): super(Route, self).__init__() self.last_idx = 1 self.has_set_default_ipv4 = False self.has_set_default_ipv6 = False self._route_name = route_name self._base_sysconf_dir = base_sysconf_dir + self.route_fn_tpl_ipv4 = ipv4_tpl + self.route_fn_tpl_ipv6 = ipv6_tpl def copy(self): - r = Route(self._route_name, self._base_sysconf_dir) + r = Route(self._route_name, self._base_sysconf_dir, + self.route_fn_tpl_ipv4, self.route_fn_tpl_ipv6) r._conf = self._conf.copy() r.last_idx = self.last_idx r.has_set_default_ipv4 = self.has_set_default_ipv4 @@ -169,18 +170,22 @@ class Route(ConfigMap): class NetInterface(ConfigMap): """Represents a sysconfig/networking-script (and its config + children).""" - iface_fn_tpl = '%(base)s/network-scripts/ifcfg-%(name)s' - iface_types = { 'ethernet': 'Ethernet', 'bond': 'Bond', 'bridge': 'Bridge', } - def __init__(self, iface_name, base_sysconf_dir, kind='ethernet'): + def __init__(self, iface_name, base_sysconf_dir, templates, + kind='ethernet'): super(NetInterface, self).__init__() self.children = [] - self.routes = Route(iface_name, base_sysconf_dir) + self.templates = templates + route_tpl = self.templates.get('route_templates') + self.routes = Route(iface_name, base_sysconf_dir, + ipv4_tpl=route_tpl.get('ipv4'), + ipv6_tpl=route_tpl.get('ipv6')) + self.iface_fn_tpl = self.templates.get('iface_templates') self.kind = kind self._iface_name = iface_name @@ -213,7 +218,8 @@ class NetInterface(ConfigMap): 'name': self.name}) def copy(self, copy_children=False, copy_routes=False): - c = NetInterface(self.name, self._base_sysconf_dir, kind=self._kind) + c = NetInterface(self.name, self._base_sysconf_dir, + self.templates, kind=self._kind) c._conf = self._conf.copy() if copy_children: c.children = list(self.children) @@ -251,6 +257,8 @@ class Renderer(renderer.Renderer): ('bridge_bridgeprio', 'PRIO'), ]) + templates = {} + def __init__(self, config=None): if not config: config = {} @@ -261,6 +269,11 @@ class Renderer(renderer.Renderer): nm_conf_path = 'etc/NetworkManager/conf.d/99-cloud-init.conf' self.networkmanager_conf_path = config.get('networkmanager_conf_path', nm_conf_path) + self.templates = { + 'control': config.get('control'), + 'iface_templates': config.get('iface_templates'), + 'route_templates': config.get('route_templates'), + } @classmethod def _render_iface_shared(cls, iface, iface_cfg): @@ -512,7 +525,7 @@ class Renderer(renderer.Renderer): return content_str @staticmethod - def _render_networkmanager_conf(network_state): + def _render_networkmanager_conf(network_state, templates=None): content = networkmanager_conf.NetworkManagerConf("") # If DNS server information is provided, configure @@ -556,14 +569,17 @@ class Renderer(renderer.Renderer): cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod - def _render_sysconfig(cls, base_sysconf_dir, network_state): + def _render_sysconfig(cls, base_sysconf_dir, network_state, + templates=None): '''Given state, return /etc/sysconfig files + contents''' + if not templates: + templates = cls.templates iface_contents = {} for iface in network_state.iter_interfaces(): if iface['type'] == "loopback": continue iface_name = iface['name'] - iface_cfg = NetInterface(iface_name, base_sysconf_dir) + iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates) cls._render_iface_shared(iface, iface_cfg) iface_contents[iface_name] = iface_cfg cls._render_physical_interfaces(network_state, iface_contents) @@ -578,17 +594,21 @@ class Renderer(renderer.Renderer): if iface_cfg: contents[iface_cfg.path] = iface_cfg.to_string() if iface_cfg.routes: - contents[iface_cfg.routes.path_ipv4] = \ - iface_cfg.routes.to_string("ipv4") - contents[iface_cfg.routes.path_ipv6] = \ - iface_cfg.routes.to_string("ipv6") + for cpath, proto in zip([iface_cfg.routes.path_ipv4, + iface_cfg.routes.path_ipv6], + ["ipv4", "ipv6"]): + if cpath not in contents: + contents[cpath] = iface_cfg.routes.to_string(proto) return contents - def render_network_state(self, network_state, target=None): + def render_network_state(self, network_state, templates=None, target=None): + if not templates: + templates = self.templates file_mode = 0o644 base_sysconf_dir = util.target_path(target, self.sysconf_dir) for path, data in self._render_sysconfig(base_sysconf_dir, - network_state).items(): + network_state, + templates=templates).items(): util.write_file(path, data, file_mode) if self.dns_path: dns_path = util.target_path(target, self.dns_path) @@ -598,7 +618,8 @@ class Renderer(renderer.Renderer): if self.networkmanager_conf_path: nm_conf_path = util.target_path(target, self.networkmanager_conf_path) - nm_conf_content = self._render_networkmanager_conf(network_state) + nm_conf_content = self._render_networkmanager_conf(network_state, + templates) if nm_conf_content: util.write_file(nm_conf_path, nm_conf_content, file_mode) if self.netrules_path: @@ -606,13 +627,16 @@ class Renderer(renderer.Renderer): netrules_path = util.target_path(target, self.netrules_path) util.write_file(netrules_path, netrules_content, file_mode) - # always write /etc/sysconfig/network configuration - sysconfig_path = util.target_path(target, "etc/sysconfig/network") - netcfg = [_make_header(), 'NETWORKING=yes'] - if network_state.use_ipv6: - netcfg.append('NETWORKING_IPV6=yes') - netcfg.append('IPV6_AUTOCONF=no') - util.write_file(sysconfig_path, "\n".join(netcfg) + "\n", file_mode) + sysconfig_path = util.target_path(target, templates.get('control')) + # Distros configuring /etc/sysconfig/network as a file e.g. Centos + if sysconfig_path.endswith('network'): + util.ensure_dir(os.path.dirname(sysconfig_path)) + netcfg = [_make_header(), 'NETWORKING=yes'] + if network_state.use_ipv6: + netcfg.append('NETWORKING_IPV6=yes') + netcfg.append('IPV6_AUTOCONF=no') + util.write_file(sysconfig_path, + "\n".join(netcfg) + "\n", file_mode) def available(target=None): diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index de24e25d..9a21426e 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -16,9 +16,9 @@ import six import unittest2 try: - from contextlib import ExitStack + from contextlib import ExitStack, contextmanager except ImportError: - from contextlib2 import ExitStack + from contextlib2 import ExitStack, contextmanager try: from configparser import ConfigParser @@ -326,6 +326,13 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): self.patchOS(root) return root + @contextmanager + def reRooted(self, root=None): + try: + yield self.reRoot(root) + finally: + self.patched_funcs.close() + class HttprettyTestCase(CiTestCase): # necessary as http_proxy gets in the way of httpretty diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 68400f22..7e6fcbbf 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -642,7 +642,7 @@ class TestConvertNetworkData(CiTestCase): routes) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), self.tmp) + network_state.parse_net_config_data(ncfg), target=self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() @@ -664,7 +664,7 @@ class TestConvertNetworkData(CiTestCase): eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), self.tmp) + network_state.parse_net_config_data(ncfg), target=self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() @@ -695,7 +695,7 @@ class TestConvertNetworkData(CiTestCase): known_macs=KNOWN_MACS) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), self.tmp) + network_state.parse_net_config_data(ncfg), target=self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 7765e408..740fb76c 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -2,24 +2,19 @@ import os from six import StringIO -import stat from textwrap import dedent try: from unittest import mock except ImportError: import mock -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack from cloudinit import distros from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit import helpers -from cloudinit.net import eni from cloudinit import settings -from cloudinit.tests.helpers import FilesystemMockingTestCase +from cloudinit.tests.helpers import ( + FilesystemMockingTestCase, dir2dict, populate_dir) from cloudinit import util @@ -82,7 +77,7 @@ V1_NET_CFG = {'config': [{'name': 'eth0', 'type': 'physical'}], 'version': 1} -V1_NET_CFG_OUTPUT = """ +V1_NET_CFG_OUTPUT = """\ # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -116,7 +111,7 @@ V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', 'version': 1} -V1_TO_V2_NET_CFG_OUTPUT = """ +V1_TO_V2_NET_CFG_OUTPUT = """\ # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -145,7 +140,7 @@ V2_NET_CFG = { } -V2_TO_V2_NET_CFG_OUTPUT = """ +V2_TO_V2_NET_CFG_OUTPUT = """\ # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -176,21 +171,10 @@ class WriteBuffer(object): return self.buffer.getvalue() -class TestNetCfgDistro(FilesystemMockingTestCase): - - frbsd_ifout = """\ -hn0: flags=8843 metric 0 mtu 1500 - options=51b - ether 00:15:5d:4c:73:00 - inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2 - inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255 - nd6 options=23 - media: Ethernet autoselect (10Gbase-T ) - status: active -""" +class TestNetCfgDistroBase(FilesystemMockingTestCase): def setUp(self): - super(TestNetCfgDistro, self).setUp() + super(TestNetCfgDistroBase, self).setUp() self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') self.add_patch('cloudinit.util.system_info', 'm_sysinfo') self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} @@ -204,144 +188,6 @@ hn0: flags=8843 metric 0 mtu 1500 paths = helpers.Paths({}) return cls(dname, cfg.get('system_info'), paths) - def test_simple_write_ub(self): - ub_distro = self._get_distro('ubuntu') - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - - ub_distro.apply_network(BASE_NET_CFG, False) - - self.assertEqual(len(write_bufs), 1) - eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg' - self.assertIn(eni_name, write_bufs) - write_buf = write_bufs[eni_name] - self.assertEqual(str(write_buf).strip(), BASE_NET_CFG.strip()) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_eni_ub(self): - ub_distro = self._get_distro('ubuntu') - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - # eni availability checks - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(eni, 'available', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'ensure_dir')) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - mocks.enter_context( - mock.patch("cloudinit.net.eni.glob.glob", - return_value=[])) - - ub_distro.apply_network_config(V1_NET_CFG, False) - - self.assertEqual(len(write_bufs), 2) - eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg' - self.assertIn(eni_name, write_bufs) - write_buf = write_bufs[eni_name] - self.assertEqual(str(write_buf).strip(), V1_NET_CFG_OUTPUT.strip()) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_v1_to_netplan_ub(self): - renderers = ['netplan'] - devlist = ['eth0', 'lo'] - ub_distro = self._get_distro('ubuntu', renderers=renderers) - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'ensure_dir')) - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=(0, 0))) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - mocks.enter_context( - mock.patch("cloudinit.net.netplan.get_devicelist", - return_value=devlist)) - - ub_distro.apply_network_config(V1_NET_CFG, False) - - self.assertEqual(len(write_bufs), 1) - netplan_name = '/etc/netplan/50-cloud-init.yaml' - self.assertIn(netplan_name, write_bufs) - write_buf = write_bufs[netplan_name] - self.assertEqual(str(write_buf).strip(), - V1_TO_V2_NET_CFG_OUTPUT.strip()) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_v2_passthrough_ub(self): - renderers = ['netplan'] - devlist = ['eth0', 'lo'] - ub_distro = self._get_distro('ubuntu', renderers=renderers) - with ExitStack() as mocks: - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'ensure_dir')) - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=(0, 0))) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - # FreeBSD does not have '/sys/class/net' file, - # so we need mock here. - mocks.enter_context( - mock.patch.object(os, 'listdir', return_value=devlist)) - ub_distro.apply_network_config(V2_NET_CFG, False) - - self.assertEqual(len(write_bufs), 1) - netplan_name = '/etc/netplan/50-cloud-init.yaml' - self.assertIn(netplan_name, write_bufs) - write_buf = write_bufs[netplan_name] - self.assertEqual(str(write_buf).strip(), - V2_TO_V2_NET_CFG_OUTPUT.strip()) - self.assertEqual(write_buf.mode, 0o644) - def assertCfgEquals(self, blob1, blob2): b1 = dict(SysConf(blob1.strip().splitlines())) b2 = dict(SysConf(blob2.strip().splitlines())) @@ -353,6 +199,20 @@ hn0: flags=8843 metric 0 mtu 1500 for (k, v) in b1.items(): self.assertEqual(v, b2[k]) + +class TestNetCfgDistroFreebsd(TestNetCfgDistroBase): + + frbsd_ifout = """\ +hn0: flags=8843 metric 0 mtu 1500 + options=51b + ether 00:15:5d:4c:73:00 + inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2 + inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255 + nd6 options=23 + media: Ethernet autoselect (10Gbase-T ) + status: active +""" + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_list') @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out') def test_get_ip_nic_freebsd(self, ifname_out, iflist): @@ -376,349 +236,33 @@ hn0: flags=8843 metric 0 mtu 1500 res = frbsd_distro.generate_fallback_config() self.assertIsNotNone(res) - def test_simple_write_rh(self): - rh_distro = self._get_distro('rhel') - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - - rh_distro.apply_network(BASE_NET_CFG, False) - - self.assertEqual(len(write_bufs), 4) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo'] - expected_buf = ''' -DEVICE="lo" -ONBOOT=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -DEVICE="eth0" -BOOTPROTO="static" -NETMASK="255.255.255.0" -IPADDR="192.168.1.5" -ONBOOT=yes -GATEWAY="192.168.1.254" -BROADCAST="192.168.1.0" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -DEVICE="eth1" -BOOTPROTO="dhcp" -ONBOOT=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_rh(self): - renderers = ['sysconfig'] - rh_distro = self._get_distro('rhel', renderers=renderers) - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - # sysconfig availability checks - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=True)) - - rh_distro.apply_network_config(V1_NET_CFG, False) - - self.assertEqual(len(write_bufs), 5) - - # eth0 - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=none -DEFROUTE=yes -DEVICE=eth0 -GATEWAY=192.168.1.254 -IPADDR=192.168.1.5 -NETMASK=255.255.255.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - # eth1 - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=dhcp -DEVICE=eth1 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_write_ipv6_rhel(self): - rh_distro = self._get_distro('rhel') - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=False)) - rh_distro.apply_network(BASE_NET_CFG_IPV6, False) - - self.assertEqual(len(write_bufs), 4) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo'] - expected_buf = ''' -DEVICE="lo" -ONBOOT=yes -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -DEVICE="eth0" -BOOTPROTO="static" -NETMASK="255.255.255.0" -IPADDR="192.168.1.5" -ONBOOT=yes -GATEWAY="192.168.1.254" -BROADCAST="192.168.1.0" -IPV6INIT=yes -IPV6ADDR="2607:f0d0:1002:0011::2" -IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -DEVICE="eth1" -BOOTPROTO="static" -NETMASK="255.255.255.0" -IPADDR="192.168.1.6" -ONBOOT=no -GATEWAY="192.168.1.254" -BROADCAST="192.168.1.0" -IPV6INIT=yes -IPV6ADDR="2607:f0d0:1002:0011::3" -IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -NETWORKING_IPV6=yes -IPV6_AUTOCONF=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_ipv6_rh(self): - renderers = ['sysconfig'] - rh_distro = self._get_distro('rhel', renderers=renderers) - - write_bufs = {} - - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'which', return_value=True)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', return_value='')) - mocks.enter_context( - mock.patch.object(os.path, 'isfile', return_value=True)) - - rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) - - self.assertEqual(len(write_bufs), 5) - - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=none -DEFROUTE=yes -DEVICE=eth0 -IPV6ADDR=2607:f0d0:1002:0011::2/64 -IPV6INIT=yes -IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', - write_bufs) - write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] - expected_buf = ''' -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=dhcp -DEVICE=eth1 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - self.assertIn('/etc/sysconfig/network', write_bufs) - write_buf = write_bufs['/etc/sysconfig/network'] - expected_buf = ''' -# Created by cloud-init v. 0.7 -NETWORKING=yes -NETWORKING_IPV6=yes -IPV6_AUTOCONF=no -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - def test_simple_write_freebsd(self): fbsd_distro = self._get_distro('freebsd') - write_bufs = {} + rc_conf = '/etc/rc.conf' read_bufs = { - '/etc/rc.conf': '', - '/etc/resolv.conf': '', + rc_conf: 'initial-rc-conf-not-validated', + '/etc/resolv.conf': 'initial-resolv-conf-not-validated', } - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - def replace_read(fname, read_cb=None, quiet=False): - if fname not in read_bufs: - if fname in write_bufs: - return str(write_bufs[fname]) - raise IOError("%s not found" % fname) - else: - if fname in write_bufs: - return str(write_bufs[fname]) - return read_bufs[fname] - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=('vtnet0', ''))) - mocks.enter_context( - mock.patch.object(os.path, 'exists', return_value=False)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', replace_read)) - - fbsd_distro.apply_network(BASE_NET_CFG, False) - - self.assertIn('/etc/rc.conf', write_bufs) - write_buf = write_bufs['/etc/rc.conf'] - expected_buf = ''' -ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" -ifconfig_vtnet1="DHCP" -defaultrouter="192.168.1.254" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) - - def test_apply_network_config_fallback(self): + tmpd = self.tmp_dir() + populate_dir(tmpd, read_bufs) + with self.reRooted(tmpd): + with mock.patch("cloudinit.distros.freebsd.util.subp", + return_value=('vtnet0', '')): + fbsd_distro.apply_network(BASE_NET_CFG, False) + results = dir2dict(tmpd) + + self.assertIn(rc_conf, results) + self.assertCfgEquals( + dedent('''\ + ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" + ifconfig_vtnet1="DHCP" + defaultrouter="192.168.1.254" + '''), results[rc_conf]) + self.assertEqual(0o644, get_mode(rc_conf, tmpd)) + + def test_apply_network_config_fallback_freebsd(self): fbsd_distro = self._get_distro('freebsd') # a weak attempt to verify that we don't have an implementation @@ -735,68 +279,324 @@ defaultrouter="192.168.1.254" "subnets": [{"type": "dhcp"}]}], 'version': 1} - write_bufs = {} + rc_conf = '/etc/rc.conf' read_bufs = { - '/etc/rc.conf': '', - '/etc/resolv.conf': '', + rc_conf: 'initial-rc-conf-not-validated', + '/etc/resolv.conf': 'initial-resolv-conf-not-validated', } - def replace_write(filename, content, mode=0o644, omode="wb"): - buf = WriteBuffer() - buf.mode = mode - buf.omode = omode - buf.write(content) - write_bufs[filename] = buf - - def replace_read(fname, read_cb=None, quiet=False): - if fname not in read_bufs: - if fname in write_bufs: - return str(write_bufs[fname]) - raise IOError("%s not found" % fname) - else: - if fname in write_bufs: - return str(write_bufs[fname]) - return read_bufs[fname] - - with ExitStack() as mocks: - mocks.enter_context( - mock.patch.object(util, 'subp', return_value=('vtnet0', ''))) - mocks.enter_context( - mock.patch.object(os.path, 'exists', return_value=False)) - mocks.enter_context( - mock.patch.object(util, 'write_file', replace_write)) - mocks.enter_context( - mock.patch.object(util, 'load_file', replace_read)) - - fbsd_distro.apply_network_config(mynetcfg, bring_up=False) - - self.assertIn('/etc/rc.conf', write_bufs) - write_buf = write_bufs['/etc/rc.conf'] - expected_buf = ''' -ifconfig_vtnet0="DHCP" -''' - self.assertCfgEquals(expected_buf, str(write_buf)) - self.assertEqual(write_buf.mode, 0o644) + tmpd = self.tmp_dir() + populate_dir(tmpd, read_bufs) + with self.reRooted(tmpd): + with mock.patch("cloudinit.distros.freebsd.util.subp", + return_value=('vtnet0', '')): + fbsd_distro.apply_network_config(mynetcfg, bring_up=False) + results = dir2dict(tmpd) - def test_simple_write_opensuse(self): - """Opensuse network rendering writes appropriate sysconfg files.""" - tmpdir = self.tmp_dir() - self.patchOS(tmpdir) - self.patchUtils(tmpdir) - distro = self._get_distro('opensuse') + self.assertIn(rc_conf, results) + self.assertCfgEquals('ifconfig_vtnet0="DHCP"', results[rc_conf]) + self.assertEqual(0o644, get_mode(rc_conf, tmpd)) - distro.apply_network(BASE_NET_CFG, False) - lo_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-lo') - eth0_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth0') - eth1_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth1') +class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroUbuntuEni, self).setUp() + self.distro = self._get_distro('ubuntu', renderers=['eni']) + + def eni_path(self): + return '/etc/network/interfaces.d/50-cloud-init.cfg' + + def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.eni.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_simple_write_ub(self): + expected_cfgs = { + self.eni_path(): BASE_NET_CFG, + } + + # ub_distro.apply_network(BASE_NET_CFG, False) + self._apply_and_verify_eni(self.distro.apply_network, + BASE_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_eni_ub(self): expected_cfgs = { - lo_path: dedent(''' + self.eni_path(): V1_NET_CFG_OUTPUT, + } + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify_eni(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): + def setUp(self): + super(TestNetCfgDistroUbuntuNetplan, self).setUp() + self.distro = self._get_distro('ubuntu', renderers=['netplan']) + self.devlist = ['eth0', 'lo'] + + def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.netplan.available', + return_value=True): + with mock.patch("cloudinit.net.netplan.get_devicelist", + return_value=self.devlist): + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def netplan_path(self): + return '/etc/netplan/50-cloud-init.yaml' + + def test_apply_network_config_v1_to_netplan_ub(self): + expected_cfgs = { + self.netplan_path(): V1_TO_V2_NET_CFG_OUTPUT, + } + + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_v2_passthrough_ub(self): + expected_cfgs = { + self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, + } + # ub_distro.apply_network_config(V2_NET_CFG, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V2_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroRedhat(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroRedhat, self).setUp() + self.distro = self._get_distro('rhel', renderers=['sysconfig']) + + def ifcfg_path(self, ifname): + return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname + + def control_path(self): + return '/etc/sysconfig/network' + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + self.assertCfgEquals(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_simple_write_rh(self): + expected_cfgs = { + self.ifcfg_path('lo'): dedent("""\ + DEVICE="lo" + ONBOOT=yes + """), + self.ifcfg_path('eth0'): dedent("""\ + DEVICE="eth0" + BOOTPROTO="static" + NETMASK="255.255.255.0" + IPADDR="192.168.1.5" + ONBOOT=yes + GATEWAY="192.168.1.254" + BROADCAST="192.168.1.0" + """), + self.ifcfg_path('eth1'): dedent("""\ + DEVICE="eth1" + BOOTPROTO="dhcp" + ONBOOT=yes + """), + self.control_path(): dedent("""\ + NETWORKING=yes + """), + } + # rh_distro.apply_network(BASE_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network, + BASE_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_rh(self): + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + GATEWAY=192.168.1.254 + IPADDR=192.168.1.5 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.control_path(): dedent("""\ + NETWORKING=yes + """), + } + # rh_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_write_ipv6_rhel(self): + expected_cfgs = { + self.ifcfg_path('lo'): dedent("""\ + DEVICE="lo" + ONBOOT=yes + """), + self.ifcfg_path('eth0'): dedent("""\ + DEVICE="eth0" + BOOTPROTO="static" + NETMASK="255.255.255.0" + IPADDR="192.168.1.5" + ONBOOT=yes + GATEWAY="192.168.1.254" + BROADCAST="192.168.1.0" + IPV6INIT=yes + IPV6ADDR="2607:f0d0:1002:0011::2" + IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" + """), + self.ifcfg_path('eth1'): dedent("""\ + DEVICE="eth1" + BOOTPROTO="static" + NETMASK="255.255.255.0" + IPADDR="192.168.1.6" + ONBOOT=no + GATEWAY="192.168.1.254" + BROADCAST="192.168.1.0" + IPV6INIT=yes + IPV6ADDR="2607:f0d0:1002:0011::3" + IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" + """), + self.control_path(): dedent("""\ + NETWORKING=yes + NETWORKING_IPV6=yes + IPV6_AUTOCONF=no + """), + } + # rh_distro.apply_network(BASE_NET_CFG_IPV6, False) + self._apply_and_verify(self.distro.apply_network, + BASE_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_ipv6_rh(self): + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + IPV6ADDR=2607:f0d0:1002:0011::2/64 + IPV6INIT=yes + IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.control_path(): dedent("""\ + NETWORKING=yes + NETWORKING_IPV6=yes + IPV6_AUTOCONF=no + """), + } + # rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroOpensuse, self).setUp() + self.distro = self._get_distro('opensuse', renderers=['sysconfig']) + + def ifcfg_path(self, ifname): + return '/etc/sysconfig/network/ifcfg-%s' % ifname + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + self.assertCfgEquals(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_simple_write_opensuse(self): + """Opensuse network rendering writes appropriate sysconfig files.""" + expected_cfgs = { + self.ifcfg_path('lo'): dedent(''' STARTMODE="auto" USERCONTROL="no" FIREWALL="no" '''), - eth0_path: dedent(''' + self.ifcfg_path('eth0'): dedent(''' BOOTPROTO="static" BROADCAST="192.168.1.0" GATEWAY="192.168.1.254" @@ -806,18 +606,77 @@ ifconfig_vtnet0="DHCP" USERCONTROL="no" ETHTOOL_OPTIONS="" '''), - eth1_path: dedent(''' + self.ifcfg_path('eth1'): dedent(''' BOOTPROTO="dhcp" STARTMODE="auto" USERCONTROL="no" ETHTOOL_OPTIONS="" ''') } - for cfgpath in (lo_path, eth0_path, eth1_path): - self.assertCfgEquals( - expected_cfgs[cfgpath], - util.load_file(cfgpath)) - file_stat = os.stat(cfgpath) - self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + + # distro.apply_network(BASE_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network, + BASE_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_opensuse(self): + """Opensuse uses apply_network_config and renders sysconfig""" + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + GATEWAY=192.168.1.254 + IPADDR=192.168.1.5 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + } + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_ipv6_opensuse(self): + """Opensuse uses apply_network_config and renders sysconfig w/ipv6""" + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + IPV6ADDR=2607:f0d0:1002:0011::2/64 + IPV6INIT=yes + IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + } + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + +def get_mode(path, target=None): + return os.stat(util.target_path(target, path)).st_mode & 0o777 # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 58e5ea14..05d5c72c 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import net +from cloudinit import distros from cloudinit.net import cmdline from cloudinit.net import ( eni, interface_has_own_mac, natural_sort_key, netplan, network_state, @@ -129,7 +130,40 @@ OS_SAMPLES = [ 'in_macs': { 'fa:16:3e:ed:9a:59': 'eth0', }, - 'out_sysconfig': [ + 'out_sysconfig_opensuse': [ + ('etc/sysconfig/network/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +NETMASK=255.255.252.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/NetworkManager/conf.d/99-cloud-init.conf', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +[main] +dns = none +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], + 'out_sysconfig_rhel': [ ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. @@ -162,6 +196,7 @@ dns = none ('etc/udev/rules.d/70-persistent-net.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + }, { 'in_data': { @@ -195,7 +230,42 @@ dns = none 'in_macs': { 'fa:16:3e:ed:9a:59': 'eth0', }, - 'out_sysconfig': [ + 'out_sysconfig_opensuse': [ + ('etc/sysconfig/network/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +IPADDR1=10.0.0.10 +NETMASK=255.255.252.0 +NETMASK1=255.255.255.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/NetworkManager/conf.d/99-cloud-init.conf', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +[main] +dns = none +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], + 'out_sysconfig_rhel': [ ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. @@ -230,6 +300,7 @@ dns = none ('etc/udev/rules.d/70-persistent-net.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + }, { 'in_data': { @@ -283,7 +354,44 @@ dns = none 'in_macs': { 'fa:16:3e:ed:9a:59': 'eth0', }, - 'out_sysconfig': [ + 'out_sysconfig_opensuse': [ + ('etc/sysconfig/network/ifcfg-eth0', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +IPV6ADDR=2001:DB8::10/64 +IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" +IPV6INIT=yes +IPV6_DEFAULTGW=2001:DB8::1 +NETMASK=255.255.252.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip()), + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/NetworkManager/conf.d/99-cloud-init.conf', + """ +# Created by cloud-init on instance boot automatically, do not edit. +# +[main] +dns = none +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], + 'out_sysconfig_rhel': [ ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. @@ -1154,7 +1262,59 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true version: 2 """), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + 'ifcfg-bond0': textwrap.dedent("""\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" + BONDING_SLAVE0=bond0s0 + BONDING_SLAVE1=bond0s1 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=bond0 + GATEWAY=192.168.0.1 + MACADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::1/92 + IPV6INIT=yes + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no + """), + 'ifcfg-bond0s0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """), + 'ifroute-bond0': textwrap.dedent("""\ + ADDRESS0=10.1.3.0 + GATEWAY0=192.168.0.3 + NETMASK0=255.255.255.0 + """), + 'ifcfg-bond0s1': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """), + }, + + 'expected_sysconfig_rhel': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" @@ -1527,7 +1687,7 @@ class TestGenerateFallbackConfig(CiTestCase): # don't set rulepath so eni writes them renderer = eni.Renderer( {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -1591,7 +1751,7 @@ iface eth0 inet dhcp # don't set rulepath so eni writes them renderer = eni.Renderer( {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -1682,7 +1842,7 @@ iface eth1 inet dhcp self.assertEqual(0, mock_settle.call_count) -class TestSysConfigRendering(CiTestCase): +class TestRhelSysConfigRendering(CiTestCase): with_logs = True @@ -1690,6 +1850,13 @@ class TestSysConfigRendering(CiTestCase): header = ('# Created by cloud-init on instance boot automatically, ' 'do not edit.\n#\n') + expected_name = 'expected_sysconfig' + + def _get_renderer(self): + distro_cls = distros.fetch('rhel') + return sysconfig.Renderer( + config=distro_cls.renderer_configs.get('sysconfig')) + def _render_and_read(self, network_config=None, state=None, dir=None): if dir is None: dir = self.tmp_dir() @@ -1701,8 +1868,8 @@ class TestSysConfigRendering(CiTestCase): else: raise ValueError("Expected data or state, got neither") - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=dir) return dir2dict(dir) def _compare_files_to_expected(self, expected, found): @@ -1745,8 +1912,8 @@ class TestSysConfigRendering(CiTestCase): render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000' with open(os.path.join(render_dir, render_file)) as fh: @@ -1797,9 +1964,9 @@ USERCTL=no network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) - renderer = sysconfig.Renderer() + renderer = self._get_renderer() with self.assertRaises(ValueError): - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertEqual([], os.listdir(render_dir)) def test_multiple_ipv6_default_gateways(self): @@ -1835,9 +2002,9 @@ USERCTL=no network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) - renderer = sysconfig.Renderer() + renderer = self._get_renderer() with self.assertRaises(ValueError): - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertEqual([], os.listdir(render_dir)) def test_openstack_rendering_samples(self): @@ -1849,12 +2016,13 @@ USERCTL=no ex_input, known_macs=ex_mac_addrs) ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) - renderer = sysconfig.Renderer() + renderer = self._get_renderer() # render a multiple times to simulate reboots - renderer.render_network_state(ns, render_dir) - renderer.render_network_state(ns, render_dir) - renderer.render_network_state(ns, render_dir) - for fn, expected_content in os_sample.get('out_sysconfig', []): + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + for fn, expected_content in os_sample.get('out_sysconfig_rhel', + []): with open(os.path.join(render_dir, fn)) as fh: self.assertEqual(expected_content, fh.read()) @@ -1862,8 +2030,8 @@ USERCTL=no ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) render_dir = self.tmp_path("render") os.makedirs(render_dir) - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = '/etc/sysconfig/network-scripts/' self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) @@ -1888,8 +2056,8 @@ USERCTL=no ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") os.makedirs(render_dir) - renderer = sysconfig.Renderer() - renderer.render_network_state(ns, render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = '/etc/sysconfig/network-scripts/' self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) @@ -1906,33 +2074,331 @@ USERCTL=no self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) def test_bond_config(self): + expected_name = 'expected_sysconfig_rhel' entry = NETWORK_CONFIGS['bond'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[expected_name], found) + self._assert_headers(found) + + def test_vlan_config(self): + entry = NETWORK_CONFIGS['vlan'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_bridge_config(self): + entry = NETWORK_CONFIGS['bridge'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_manual_config(self): + entry = NETWORK_CONFIGS['manual'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_all_config(self): + entry = NETWORK_CONFIGS['all'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + self.assertNotIn( + 'WARNING: Network config: ignoring eth0.101 device-level mtu', + self.logs.getvalue()) + + def test_small_config(self): + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_v4_and_v6_static_config(self): + entry = NETWORK_CONFIGS['v4_and_v6_static'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + expected_msg = ( + 'WARNING: Network config: ignoring iface0 device-level mtu:8999' + ' because ipv4 subnet-level mtu:9000 provided.') + self.assertIn(expected_msg, self.logs.getvalue()) + + def test_dhcpv6_only_config(self): + entry = NETWORK_CONFIGS['dhcpv6_only'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + +class TestOpenSuseSysConfigRendering(CiTestCase): + + with_logs = True + + scripts_dir = '/etc/sysconfig/network' + header = ('# Created by cloud-init on instance boot automatically, ' + 'do not edit.\n#\n') + + expected_name = 'expected_sysconfig' + + def _get_renderer(self): + distro_cls = distros.fetch('opensuse') + return sysconfig.Renderer( + config=distro_cls.renderer_configs.get('sysconfig')) + + def _render_and_read(self, network_config=None, state=None, dir=None): + if dir is None: + dir = self.tmp_dir() + + if network_config: + ns = network_state.parse_net_config_data(network_config) + elif state: + ns = state + else: + raise ValueError("Expected data or state, got neither") + + renderer = self._get_renderer() + renderer.render_network_state(ns, target=dir) + return dir2dict(dir) + + def _compare_files_to_expected(self, expected, found): + orig_maxdiff = self.maxDiff + expected_d = dict( + (os.path.join(self.scripts_dir, k), util.load_shell_content(v)) + for k, v in expected.items()) + + # only compare the files in scripts_dir + scripts_found = dict( + (k, util.load_shell_content(v)) for k, v in found.items() + if k.startswith(self.scripts_dir)) + try: + self.maxDiff = None + self.assertEqual(expected_d, scripts_found) + finally: + self.maxDiff = orig_maxdiff + + def _assert_headers(self, found): + missing = [f for f in found + if (f.startswith(self.scripts_dir) and + not found[f].startswith(self.header))] + if missing: + raise AssertionError("Missing headers in: %s" % missing) + + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_default_generation(self, mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path): + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path) + + network_cfg = net.generate_fallback_config() + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + + render_dir = os.path.join(tmp_dir, "render") + os.makedirs(render_dir) + + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + + render_file = 'etc/sysconfig/network/ifcfg-eth1000' + with open(os.path.join(render_dir, render_file)) as fh: + content = fh.read() + expected_content = """ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth1000 +HWADDR=07-1C-C6-75-A4-BE +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip() + self.assertEqual(expected_content, content) + + def test_multiple_ipv4_default_gateways(self): + """ValueError is raised when duplicate ipv4 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + "type": "ipv4", "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [{ + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }, { + "netmask": "0.0.0.0", # A second default gateway + "network": "0.0.0.0", + "gateway": "172.20.3.254", + }], + "ip_address": "172.19.1.34", "id": "network0" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = self._get_renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, target=render_dir) + self.assertEqual([], os.listdir(render_dir)) + + def test_multiple_ipv6_default_gateways(self): + """ValueError is raised when duplicate ipv6 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "public-ipv6", + "type": "ipv6", "netmask": "", + "link": "tap1a81968a-79", + "routes": [{ + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::" + }, { + "gateway": "2001:DB9::1", + "netmask": "::", + "network": "::" + }], + "ip_address": "2001:DB8::10", "id": "network1" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = self._get_renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, target=render_dir) + self.assertEqual([], os.listdir(render_dir)) + + def test_openstack_rendering_samples(self): + for os_sample in OS_SAMPLES: + render_dir = self.tmp_dir() + ex_input = os_sample['in_data'] + ex_mac_addrs = os_sample['in_macs'] + network_cfg = openstack.convert_net_json( + ex_input, known_macs=ex_mac_addrs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = self._get_renderer() + # render a multiple times to simulate reboots + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + renderer.render_network_state(ns, target=render_dir) + for fn, expected_content in os_sample.get('out_sysconfig_opensuse', + []): + with open(os.path.join(render_dir, fn)) as fh: + self.assertEqual(expected_content, fh.read()) + + def test_network_config_v1_samples(self): + ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=interface0 +GATEWAY=10.0.2.2 +HWADDR=52:54:00:12:34:00 +IPADDR=10.0.2.15 +NETMASK=255.255.255.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + + def test_config_with_explicit_loopback(self): + ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + + def test_bond_config(self): + expected_name = 'expected_sysconfig_opensuse' + entry = NETWORK_CONFIGS['bond'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + for fname, contents in entry[expected_name].items(): + print(fname) + print(contents) + print() + print('-- expected ^ | v rendered --') + for fname, contents in found.items(): + print(fname) + print(contents) + print() + self._compare_files_to_expected(entry[expected_name], found) self._assert_headers(found) def test_vlan_config(self): entry = NETWORK_CONFIGS['vlan'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_bridge_config(self): entry = NETWORK_CONFIGS['bridge'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_manual_config(self): entry = NETWORK_CONFIGS['manual'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_all_config(self): entry = NETWORK_CONFIGS['all'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) self.assertNotIn( 'WARNING: Network config: ignoring eth0.101 device-level mtu', @@ -1941,13 +2407,13 @@ USERCTL=no def test_small_config(self): entry = NETWORK_CONFIGS['small'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_v4_and_v6_static_config(self): entry = NETWORK_CONFIGS['v4_and_v6_static'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) expected_msg = ( 'WARNING: Network config: ignoring iface0 device-level mtu:8999' @@ -1957,7 +2423,7 @@ USERCTL=no def test_dhcpv6_only_config(self): entry = NETWORK_CONFIGS['dhcpv6_only'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) @@ -1982,7 +2448,7 @@ class TestEniNetRendering(CiTestCase): renderer = eni.Renderer( {'eni_path': 'interfaces', 'netrules_path': None}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -2002,7 +2468,7 @@ iface eth1000 inet dhcp tmp_dir = self.tmp_dir() ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) renderer = eni.Renderer() - renderer.render_network_state(ns, tmp_dir) + renderer.render_network_state(ns, target=tmp_dir) expected = """\ auto lo iface lo inet loopback @@ -2038,7 +2504,7 @@ class TestNetplanNetRendering(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': False}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, render_target))) @@ -2143,7 +2609,7 @@ class TestNetplanPostcommands(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': True}) - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) mock_netplan_generate.assert_called_with(run=True) mock_net_setup_link.assert_called_with(run=True) @@ -2168,7 +2634,7 @@ class TestNetplanPostcommands(CiTestCase): '/sys/class/net/lo'], capture=True), ] with mock.patch.object(os.path, 'islink', return_value=True): - renderer.render_network_state(ns, render_dir) + renderer.render_network_state(ns, target=render_dir) mock_subp.assert_has_calls(expected) @@ -2363,7 +2829,7 @@ class TestNetplanRoundTrip(CiTestCase): renderer = netplan.Renderer( config={'netplan_path': netplan_path}) - renderer.render_network_state(ns, target) + renderer.render_network_state(ns, target=target) return dir2dict(target) def testsimple_render_bond_netplan(self): @@ -2453,7 +2919,7 @@ class TestEniRoundTrip(CiTestCase): renderer = eni.Renderer( config={'eni_path': eni_path, 'netrules_path': netrules_path}) - renderer.render_network_state(ns, dir) + renderer.render_network_state(ns, target=dir) return dir2dict(dir) def testsimple_convert_and_render(self): -- cgit v1.2.3 From a8dcad9ac62bb1d2a4f7489960395bad6cac9382 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 5 Sep 2018 16:02:25 +0000 Subject: tests: Disallow use of util.subp except for where needed. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In many cases, cloud-init uses 'util.subp' to run a subprocess. This is not really desirable in our unit tests as it makes the tests dependent upon existance of those utilities. The change here is to modify the base test case class (CiTestCase) to raise exception any time subp is called. Then, fix all callers. For cases where subp is necessary or actually desired, we can use it via   a.) context hander CiTestCase.allow_subp(value)   b.) class level self.allowed_subp = value Both cases the value is a list of acceptable executable names that will be called (essentially argv[0]). Some cleanups in AltCloud were done as the code was being updated. --- cloudinit/analyze/tests/test_dump.py | 86 ++++++++------------ cloudinit/cmd/tests/test_status.py | 6 +- cloudinit/config/tests/test_snap.py | 7 +- cloudinit/config/tests/test_ubuntu_advantage.py | 7 +- cloudinit/net/tests/test_init.py | 2 + cloudinit/sources/DataSourceAltCloud.py | 24 +++--- cloudinit/sources/DataSourceSmartOS.py | 29 ++++--- cloudinit/tests/helpers.py | 43 ++++++++++ tests/unittests/test_datasource/test_altcloud.py | 44 +++++----- tests/unittests/test_datasource/test_cloudsigma.py | 3 + .../unittests/test_datasource/test_configdrive.py | 3 + tests/unittests/test_datasource/test_nocloud.py | 2 + tests/unittests/test_datasource/test_opennebula.py | 1 + tests/unittests/test_datasource/test_ovf.py | 8 +- tests/unittests/test_datasource/test_smartos.py | 94 +++++++++++++++------- tests/unittests/test_ds_identify.py | 1 + .../test_handler/test_handler_apt_source_v3.py | 11 ++- .../unittests/test_handler/test_handler_bootcmd.py | 10 ++- tests/unittests/test_handler/test_handler_chef.py | 18 +++-- .../test_handler/test_handler_resizefs.py | 8 +- tests/unittests/test_handler/test_schema.py | 12 ++- tests/unittests/test_net.py | 18 ++++- tests/unittests/test_util.py | 27 ++++--- 23 files changed, 289 insertions(+), 175 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py index f4c42841..db2a667b 100644 --- a/cloudinit/analyze/tests/test_dump.py +++ b/cloudinit/analyze/tests/test_dump.py @@ -5,8 +5,8 @@ from textwrap import dedent from cloudinit.analyze.dump import ( dump_events, parse_ci_logline, parse_timestamp) -from cloudinit.util import subp, write_file -from cloudinit.tests.helpers import CiTestCase +from cloudinit.util import which, write_file +from cloudinit.tests.helpers import CiTestCase, mock, skipIf class TestParseTimestamp(CiTestCase): @@ -15,21 +15,9 @@ class TestParseTimestamp(CiTestCase): """Logs with cloud-init detailed formats will be properly parsed.""" trusty_fmt = '%Y-%m-%d %H:%M:%S,%f' trusty_stamp = '2016-09-12 14:39:20,839' - - parsed = parse_timestamp(trusty_stamp) - - # convert ourselves dt = datetime.strptime(trusty_stamp, trusty_fmt) - expected = float(dt.strftime('%s.%f')) - - # use date(1) - out, _err = subp(['date', '+%s.%3N', '-d', trusty_stamp]) - timestamp = out.strip() - date_ts = float(timestamp) - - self.assertEqual(expected, parsed) - self.assertEqual(expected, date_ts) - self.assertEqual(date_ts, parsed) + self.assertEqual( + float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp)) def test_parse_timestamp_handles_syslog_adding_year(self): """Syslog timestamps lack a year. Add year and properly parse.""" @@ -39,17 +27,9 @@ class TestParseTimestamp(CiTestCase): # convert stamp ourselves by adding the missing year value year = datetime.now().year dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt) - expected = float(dt.strftime('%s.%f')) - parsed = parse_timestamp(syslog_stamp) - - # use date(1) - out, _ = subp(['date', '+%s.%3N', '-d', syslog_stamp]) - timestamp = out.strip() - date_ts = float(timestamp) - - self.assertEqual(expected, parsed) - self.assertEqual(expected, date_ts) - self.assertEqual(date_ts, parsed) + self.assertEqual( + float(dt.strftime('%s.%f')), + parse_timestamp(syslog_stamp)) def test_parse_timestamp_handles_journalctl_format_adding_year(self): """Journalctl precise timestamps lack a year. Add year and parse.""" @@ -59,37 +39,22 @@ class TestParseTimestamp(CiTestCase): # convert stamp ourselves by adding the missing year value year = datetime.now().year dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt) - expected = float(dt.strftime('%s.%f')) - parsed = parse_timestamp(journal_stamp) - - # use date(1) - out, _ = subp(['date', '+%s.%6N', '-d', journal_stamp]) - timestamp = out.strip() - date_ts = float(timestamp) - - self.assertEqual(expected, parsed) - self.assertEqual(expected, date_ts) - self.assertEqual(date_ts, parsed) + self.assertEqual( + float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp)) + @skipIf(not which("date"), "'date' command not available.") def test_parse_unexpected_timestamp_format_with_date_command(self): - """Dump sends unexpected timestamp formats to data for processing.""" + """Dump sends unexpected timestamp formats to date for processing.""" new_fmt = '%H:%M %m/%d %Y' new_stamp = '17:15 08/08' - # convert stamp ourselves by adding the missing year value year = datetime.now().year dt = datetime.strptime(new_stamp + " " + str(year), new_fmt) - expected = float(dt.strftime('%s.%f')) - parsed = parse_timestamp(new_stamp) # use date(1) - out, _ = subp(['date', '+%s.%6N', '-d', new_stamp]) - timestamp = out.strip() - date_ts = float(timestamp) - - self.assertEqual(expected, parsed) - self.assertEqual(expected, date_ts) - self.assertEqual(date_ts, parsed) + with self.allow_subp(["date"]): + self.assertEqual( + float(dt.strftime('%s.%f')), parse_timestamp(new_stamp)) class TestParseCILogLine(CiTestCase): @@ -135,7 +100,9 @@ class TestParseCILogLine(CiTestCase): 'timestamp': timestamp} self.assertEqual(expected, parse_ci_logline(line)) - def test_parse_logline_returns_event_for_finish_events(self): + @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") + def test_parse_logline_returns_event_for_finish_events(self, + m_parse_from_date): """parse_ci_logline returns a finish event for a parsed log line.""" line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]' ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running' @@ -147,7 +114,10 @@ class TestParseCILogLine(CiTestCase): 'origin': 'cloudinit', 'result': 'SUCCESS', 'timestamp': 1472594005.972} + m_parse_from_date.return_value = "1472594005.972" self.assertEqual(expected, parse_ci_logline(line)) + m_parse_from_date.assert_has_calls( + [mock.call("2016-08-30 21:53:25.972325+00:00")]) SAMPLE_LOGS = dedent("""\ @@ -162,10 +132,16 @@ Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\ class TestDumpEvents(CiTestCase): maxDiff = None - def test_dump_events_with_rawdata(self): + @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") + def test_dump_events_with_rawdata(self, m_parse_from_date): """Rawdata is split and parsed into a tuple of events and data""" + m_parse_from_date.return_value = "1472594005.972" events, data = dump_events(rawdata=SAMPLE_LOGS) expected_data = SAMPLE_LOGS.splitlines() + self.assertEqual( + [mock.call("2016-08-30 21:53:25.972325+00:00")], + m_parse_from_date.call_args_list) + self.assertEqual(expected_data, data) year = datetime.now().year dt1 = datetime.strptime( 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') @@ -183,12 +159,14 @@ class TestDumpEvents(CiTestCase): 'result': 'SUCCESS', 'timestamp': 1472594005.972}] self.assertEqual(expected_events, events) - self.assertEqual(expected_data, data) - def test_dump_events_with_cisource(self): + @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") + def test_dump_events_with_cisource(self, m_parse_from_date): """Cisource file is read and parsed into a tuple of events and data.""" tmpfile = self.tmp_path('logfile') write_file(tmpfile, SAMPLE_LOGS) + m_parse_from_date.return_value = 1472594005.972 + events, data = dump_events(cisource=open(tmpfile)) year = datetime.now().year dt1 = datetime.strptime( @@ -208,3 +186,5 @@ class TestDumpEvents(CiTestCase): 'timestamp': 1472594005.972}] self.assertEqual(expected_events, events) self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data]) + m_parse_from_date.assert_has_calls( + [mock.call("2016-08-30 21:53:25.972325+00:00")]) diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index 37a89936..aded8580 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -39,7 +39,8 @@ class TestStatus(CiTestCase): ensure_file(self.disable_file) # Create the ignored disable file (is_disabled, reason) = wrap_and_call( 'cloudinit.cmd.status', - {'uses_systemd': False}, + {'uses_systemd': False, + 'get_cmdline': "root=/dev/my-root not-important"}, status._is_cloudinit_disabled, self.disable_file, self.paths) self.assertFalse( is_disabled, 'expected enabled cloud-init on sysvinit') @@ -50,7 +51,8 @@ class TestStatus(CiTestCase): ensure_file(self.disable_file) # Create observed disable file (is_disabled, reason) = wrap_and_call( 'cloudinit.cmd.status', - {'uses_systemd': True}, + {'uses_systemd': True, + 'get_cmdline': "root=/dev/my-root not-important"}, status._is_cloudinit_disabled, self.disable_file, self.paths) self.assertTrue(is_disabled, 'expected disabled cloud-init') self.assertEqual( diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index 34c80f1e..3c472891 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -162,6 +162,7 @@ class TestAddAssertions(CiTestCase): class TestRunCommands(CiTestCase): with_logs = True + allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] def setUp(self): super(TestRunCommands, self).setUp() @@ -424,8 +425,10 @@ class TestHandle(CiTestCase): 'snap': {'commands': ['echo "HI" >> %s' % outfile, 'echo "MOM" >> %s' % outfile]}} mock_path = 'cloudinit.config.cc_snap.sys.stderr' - with mock.patch(mock_path, new_callable=StringIO): - handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): + with mock.patch(mock_path, new_callable=StringIO): + handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) @mock.patch('cloudinit.config.cc_snap.util.subp') diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py index f1beeff8..b7cf9bee 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -23,6 +23,7 @@ class FakeCloud(object): class TestRunCommands(CiTestCase): with_logs = True + allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] def setUp(self): super(TestRunCommands, self).setUp() @@ -234,8 +235,10 @@ class TestHandle(CiTestCase): 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, 'echo "MOM" >> %s' % outfile]}} mock_path = '%s.sys.stderr' % MPATH - with mock.patch(mock_path, new_callable=StringIO): - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): + with mock.patch(mock_path, new_callable=StringIO): + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, + args=None) self.assertEqual('HI\nMOM\n', util.load_file(outfile)) diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 5c017d15..8b444f14 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -199,6 +199,8 @@ class TestGenerateFallbackConfig(CiTestCase): self.sysdir = self.tmp_dir() + '/' self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) + self.add_patch('cloudinit.net.util.is_container', 'm_is_container', + return_value=False) self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') def test_generate_fallback_finds_connected_eth_with_mac(self): diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 24fd65ff..8cd312d0 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -181,27 +181,18 @@ class DataSourceAltCloud(sources.DataSource): # modprobe floppy try: - cmd = CMD_PROBE_FLOPPY - (cmd_out, _err) = util.subp(cmd) - LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) + modprobe_floppy() except ProcessExecutionError as e: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) - return False - except OSError as e: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) + util.logexc(LOG, 'Failed modprobe: %s', e) return False floppy_dev = '/dev/fd0' # udevadm settle for floppy device try: - (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5) - LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) - except ProcessExecutionError as e: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) - return False - except OSError as e: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e) + util.udevadm_settle(exists=floppy_dev, timeout=5) + except (ProcessExecutionError, OSError) as e: + util.logexc(LOG, 'Failed udevadm_settle: %s\n', e) return False try: @@ -258,6 +249,11 @@ class DataSourceAltCloud(sources.DataSource): return False +def modprobe_floppy(): + out, _err = util.subp(CMD_PROBE_FLOPPY) + LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out) + + # Used to match classes to dependencies # Source DataSourceAltCloud does not really depend on networking. # In the future 'dsmode' like behavior can be added to offer user diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index ad8cfb9c..593ac91a 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -683,6 +683,18 @@ def jmc_client_factory( raise ValueError("Unknown value for smartos_type: %s" % smartos_type) +def identify_file(content_f): + cmd = ["file", "--brief", "--mime-type", content_f] + f_type = None + try: + (f_type, _err) = util.subp(cmd) + LOG.debug("script %s mime type is %s", content_f, f_type) + except util.ProcessExecutionError as e: + util.logexc( + LOG, ("Failed to identify script type for %s" % content_f, e)) + return None if f_type is None else f_type.strip() + + def write_boot_content(content, content_f, link=None, shebang=False, mode=0o400): """ @@ -715,18 +727,11 @@ def write_boot_content(content, content_f, link=None, shebang=False, util.write_file(content_f, content, mode=mode) if shebang and not content.startswith("#!"): - try: - cmd = ["file", "--brief", "--mime-type", content_f] - (f_type, _err) = util.subp(cmd) - LOG.debug("script %s mime type is %s", content_f, f_type) - if f_type.strip() == "text/plain": - new_content = "\n".join(["#!/bin/bash", content]) - util.write_file(content_f, new_content, mode=mode) - LOG.debug("added shebang to file %s", content_f) - - except Exception as e: - util.logexc(LOG, ("Failed to identify script type for %s" % - content_f, e)) + f_type = identify_file(content_f) + if f_type == "text/plain": + util.write_file( + content_f, "\n".join(["#!/bin/bash", content]), mode=mode) + LOG.debug("added shebang to file %s", content_f) if link: try: diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 9a21426e..e7e4182f 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -33,6 +33,8 @@ from cloudinit import helpers as ch from cloudinit.sources import DataSourceNone from cloudinit import util +_real_subp = util.subp + # Used for skipping tests SkipTest = unittest2.SkipTest skipIf = unittest2.skipIf @@ -143,6 +145,17 @@ class CiTestCase(TestCase): # Subclass overrides for specific test behavior # Whether or not a unit test needs logfile setup with_logs = False + allowed_subp = False + SUBP_SHELL_TRUE = "shell=true" + + @contextmanager + def allow_subp(self, allowed_subp): + orig = self.allowed_subp + try: + self.allowed_subp = allowed_subp + yield + finally: + self.allowed_subp = orig def setUp(self): super(CiTestCase, self).setUp() @@ -155,11 +168,41 @@ class CiTestCase(TestCase): handler.setFormatter(formatter) self.old_handlers = self.logger.handlers self.logger.handlers = [handler] + if self.allowed_subp is True: + util.subp = _real_subp + else: + util.subp = self._fake_subp + + def _fake_subp(self, *args, **kwargs): + if 'args' in kwargs: + cmd = kwargs['args'] + else: + cmd = args[0] + + if not isinstance(cmd, six.string_types): + cmd = cmd[0] + pass_through = False + if not isinstance(self.allowed_subp, (list, bool)): + raise TypeError("self.allowed_subp supports list or bool.") + if isinstance(self.allowed_subp, bool): + pass_through = self.allowed_subp + else: + pass_through = ( + (cmd in self.allowed_subp) or + (self.SUBP_SHELL_TRUE in self.allowed_subp and + kwargs.get('shell'))) + if pass_through: + return _real_subp(*args, **kwargs) + raise Exception( + "called subp. set self.allowed_subp=True to allow\n subp(%s)" % + ', '.join([str(repr(a)) for a in args] + + ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()])) def tearDown(self): if self.with_logs: # Remove the handler we setup logging.getLogger().handlers = self.old_handlers + util.subp = _real_subp super(CiTestCase, self).tearDown() def tmp_dir(self, dir=None, cleanup=True): diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index 3253f3ad..ff35904e 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -262,64 +262,56 @@ class TestUserDataRhevm(CiTestCase): ''' Test to exercise method: DataSourceAltCloud.user_data_rhevm() ''' - cmd_pass = ['true'] - cmd_fail = ['false'] - cmd_not_found = ['bogus bad command'] - def setUp(self): '''Set up.''' self.paths = helpers.Paths({'cloud_dir': '/tmp'}) - self.mount_dir = tempfile.mkdtemp() + self.mount_dir = self.tmp_dir() _write_user_data_files(self.mount_dir, 'test user data') - - def tearDown(self): - # Reset - - _remove_user_data_files(self.mount_dir) - - # Attempt to remove the temp dir ignoring errors - try: - shutil.rmtree(self.mount_dir) - except OSError: - pass - - dsac.CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' - dsac.CMD_PROBE_FLOPPY = ['modprobe', 'floppy'] - dsac.CMD_UDEVADM_SETTLE = ['udevadm', 'settle', - '--quiet', '--timeout=5'] + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy', + 'm_modprobe_floppy', return_value=None) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle', + 'm_udevadm_settle', return_value=('', '')) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.mount_cb', + 'm_mount_cb') def test_mount_cb_fails(self): '''Test user_data_rhevm() where mount_cb fails.''' - dsac.CMD_PROBE_FLOPPY = self.cmd_pass + self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_modprobe_fails(self): '''Test user_data_rhevm() where modprobe fails.''' - dsac.CMD_PROBE_FLOPPY = self.cmd_fail + self.m_modprobe_floppy.side_effect = util.ProcessExecutionError( + "Failed modprobe") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_modprobe_cmd(self): '''Test user_data_rhevm() with no modprobe command.''' - dsac.CMD_PROBE_FLOPPY = self.cmd_not_found + self.m_modprobe_floppy.side_effect = util.ProcessExecutionError( + "No such file or dir") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_udevadm_fails(self): '''Test user_data_rhevm() where udevadm fails.''' - dsac.CMD_UDEVADM_SETTLE = self.cmd_fail + self.m_udevadm_settle.side_effect = util.ProcessExecutionError( + "Failed settle.") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_udevadm_cmd(self): '''Test user_data_rhevm() with no udevadm command.''' - dsac.CMD_UDEVADM_SETTLE = self.cmd_not_found + self.m_udevadm_settle.side_effect = OSError("No such file or dir") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index f6a59b6b..380ad1b5 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -42,6 +42,9 @@ class CepkoMock(Cepko): class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() + self.add_patch( + "cloudinit.sources.DataSourceCloudSigma.util.is_container", + "m_is_container", return_value=False) self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( "", "", paths=self.paths) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 7e6fcbbf..b0abfc5f 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -224,6 +224,9 @@ class TestConfigDriveDataSource(CiTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() + self.add_patch( + "cloudinit.sources.DataSourceConfigDrive.util.find_devs_with", + "m_find_devs_with", return_value=[]) self.tmp = self.tmp_dir() def test_ec2_metadata(self): diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index cdbd1e1a..21931eb7 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -25,6 +25,8 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'get_cmdline', return_value=self.cmdline)) + self.mocks.enter_context( + mock.patch.object(util, 'read_dmi_data', return_value=None)) def test_nocloud_seed_dir(self): md = {'instance-id': 'IID', 'dsmode': 'local'} diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 36b4d771..61591017 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -43,6 +43,7 @@ DS_PATH = "cloudinit.sources.DataSourceOpenNebula" class TestOpenNebulaDataSource(CiTestCase): parsed_user = None + allowed_subp = ['bash'] def setUp(self): super(TestOpenNebulaDataSource, self).setUp() diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index fc4eb36e..9d52eb99 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -124,7 +124,9 @@ class TestDatasourceOVF(CiTestCase): ds = self.datasource(sys_cfg={}, distro={}, paths=paths) retcode = wrap_and_call( 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': None}, + {'util.read_dmi_data': None, + 'transport_iso9660': (False, None, None), + 'transport_vmware_guestd': (False, None, None)}, ds.get_data) self.assertFalse(retcode, 'Expected False return from ds.get_data') self.assertIn( @@ -138,7 +140,9 @@ class TestDatasourceOVF(CiTestCase): paths=paths) retcode = wrap_and_call( 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': 'vmware'}, + {'util.read_dmi_data': 'vmware', + 'transport_iso9660': (False, None, None), + 'transport_vmware_guestd': (False, None, None)}, ds.get_data) self.assertFalse(retcode, 'Expected False return from ds.get_data') self.assertIn( diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index dca0b3d4..46d67b94 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -20,10 +20,8 @@ import multiprocessing import os import os.path import re -import shutil import signal import stat -import tempfile import unittest2 import uuid @@ -31,15 +29,27 @@ from cloudinit import serial from cloudinit.sources import DataSourceSmartOS from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, - SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ) + SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, + identify_file) import six from cloudinit import helpers as c_helpers -from cloudinit.util import (b64e, subp) +from cloudinit.util import ( + b64e, subp, ProcessExecutionError, which, write_file) -from cloudinit.tests.helpers import mock, FilesystemMockingTestCase, TestCase +from cloudinit.tests.helpers import ( + CiTestCase, mock, FilesystemMockingTestCase, skipIf) + +try: + import serial as _pyserial + assert _pyserial # avoid pyflakes error F401: import unused + HAS_PYSERIAL = True +except ImportError: + HAS_PYSERIAL = False + +DSMOS = 'cloudinit.sources.DataSourceSmartOS' SDC_NICS = json.loads(""" [ { @@ -366,37 +376,33 @@ class PsuedoJoyentClient(object): class TestSmartOSDataSource(FilesystemMockingTestCase): + jmc_cfact = None + get_smartos_environ = None + def setUp(self): super(TestSmartOSDataSource, self).setUp() - dsmos = 'cloudinit.sources.DataSourceSmartOS' - patcher = mock.patch(dsmos + ".jmc_client_factory") - self.jmc_cfact = patcher.start() - self.addCleanup(patcher.stop) - patcher = mock.patch(dsmos + ".get_smartos_environ") - self.get_smartos_environ = patcher.start() - self.addCleanup(patcher.stop) - - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.paths = c_helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - - self.legacy_user_d = os.path.join(self.tmp, 'legacy_user_tmp') + self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ") + self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact") + self.legacy_user_d = self.tmp_path('legacy_user_tmp') os.mkdir(self.legacy_user_d) - - self.orig_lud = DataSourceSmartOS.LEGACY_USER_D - DataSourceSmartOS.LEGACY_USER_D = self.legacy_user_d - - def tearDown(self): - DataSourceSmartOS.LEGACY_USER_D = self.orig_lud - super(TestSmartOSDataSource, self).tearDown() + self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d", + autospec=False, new=self.legacy_user_d) + self.add_patch(DSMOS + ".identify_file", "m_identify_file", + return_value="text/plain") def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM, sys_cfg=None, ds_cfg=None): self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata) self.get_smartos_environ.return_value = mode + tmpd = self.tmp_dir() + dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), + 'run_dir': self.tmp_path('run_dir')} + for d in dirs.values(): + os.mkdir(d) + paths = c_helpers.Paths(dirs) + if sys_cfg is None: sys_cfg = {} @@ -405,7 +411,7 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): sys_cfg['datasource']['SmartOS'] = ds_cfg return DataSourceSmartOS.DataSourceSmartOS( - sys_cfg, distro=None, paths=self.paths) + sys_cfg, distro=None, paths=paths) def test_no_base64(self): ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} @@ -493,6 +499,7 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): dsrc.metadata['user-script']) legacy_script_f = "%s/user-script" % self.legacy_user_d + print("legacy_script_f=%s" % legacy_script_f) self.assertTrue(os.path.exists(legacy_script_f)) self.assertTrue(os.path.islink(legacy_script_f)) user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] @@ -640,6 +647,28 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): mydscfg['disk_aliases']['FOO']) +class TestIdentifyFile(CiTestCase): + """Test the 'identify_file' utility.""" + @skipIf(not which("file"), "command 'file' not available.") + def test_file_happy_path(self): + """Test file is available and functional on plain text.""" + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + with self.allow_subp(["file"]): + self.assertEqual("text/plain", identify_file(fname)) + + @mock.patch(DSMOS + ".util.subp") + def test_returns_none_on_error(self, m_subp): + """On 'file' execution error, None should be returned.""" + m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99) + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + self.assertEqual(None, identify_file(fname)) + self.assertEqual( + [mock.call(["file", "--brief", "--mime-type", fname])], + m_subp.call_args_list) + + class ShortReader(object): """Implements a 'read' interface for bytes provided. much like io.BytesIO but the 'endbyte' acts as if EOF. @@ -893,7 +922,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): self.assertEqual(client.list(), []) -class TestNetworkConversion(TestCase): +class TestNetworkConversion(CiTestCase): def test_convert_simple(self): expected = { 'version': 1, @@ -1058,7 +1087,8 @@ class TestNetworkConversion(TestCase): "Only supported on KVM and bhyve guests under SmartOS") @unittest2.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), "Requires write access to " + SERIAL_DEVICE) -class TestSerialConcurrency(TestCase): +@unittest2.skipUnless(HAS_PYSERIAL is True, "pyserial not available") +class TestSerialConcurrency(CiTestCase): """ This class tests locking on an actual serial port, and as such can only be run in a kvm or bhyve guest running on a SmartOS host. A test run on @@ -1066,7 +1096,11 @@ class TestSerialConcurrency(TestCase): there is only one session over a connection. In contrast, in the absence of proper locking multiple processes opening the same serial port can corrupt each others' exchanges with the metadata server. + + This takes on the order of 2 to 3 minutes to run. """ + allowed_subp = ['mdata-get'] + def setUp(self): self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) self.mdata_proc.start() @@ -1097,7 +1131,7 @@ class TestSerialConcurrency(TestCase): keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) - client = ds.jmc_client_factory() + client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM) self.assertIsNotNone(client) # The behavior that we are testing for was observed mdata-get running diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index e08e7908..46778e95 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -89,6 +89,7 @@ CallReturn = namedtuple('CallReturn', class DsIdentifyBase(CiTestCase): dsid_path = os.path.realpath('tools/ds-identify') + allowed_subp = ['sh'] def call(self, rootd=None, mocks=None, func="main", args=None, files=None, policy_dmi=DI_DEFAULT_POLICY, diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index 7a64c230..a81c67c7 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -48,6 +48,10 @@ ADD_APT_REPO_MATCH = r"^[\w-]+:\w" TARGET = None +MOCK_LSB_RELEASE_DATA = { + 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS', + 'release': '18.04', 'codename': 'bionic'} + class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """TestAptSourceConfig @@ -64,6 +68,9 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list") self.join = os.path.join self.matcher = re.compile(ADD_APT_REPO_MATCH).search + self.add_patch( + 'cloudinit.config.cc_apt_configure.util.lsb_release', + 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy()) @staticmethod def _add_apt_sources(*args, **kwargs): @@ -76,7 +83,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): Get the most basic default mrror and release info to be used in tests """ params = {} - params['RELEASE'] = util.lsb_release()['codename'] + params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release'] arch = 'amd64' params['MIRROR'] = cc_apt_configure.\ get_default_mirrors(arch)["PRIMARY"] @@ -464,7 +471,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'uri': 'http://testsec.ubuntu.com/%s/' % component}]} post = ("%s_dists_%s-updates_InRelease" % - (component, util.lsb_release()['codename'])) + (component, MOCK_LSB_RELEASE_DATA['codename'])) fromfn = ("%s/%s_%s" % (pre, archive, post)) tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py index b1375269..a76760fa 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/test_handler/test_handler_bootcmd.py @@ -118,7 +118,8 @@ class TestBootcmd(CiTestCase): 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]} with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - handle('cc_bootcmd', valid_config, cc, LOG, []) + with self.allow_subp(['/bin/sh']): + handle('cc_bootcmd', valid_config, cc, LOG, []) self.assertEqual(my_id + ' iid-datasource-none\n', util.load_file(out_file)) @@ -128,12 +129,13 @@ class TestBootcmd(CiTestCase): valid_config = {'bootcmd': ['exit 1']} # Script with error with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - with self.assertRaises(util.ProcessExecutionError) as ctxt_manager: - handle('does-not-matter', valid_config, cc, LOG, []) + with self.allow_subp(['/bin/sh']): + with self.assertRaises(util.ProcessExecutionError) as ctxt: + handle('does-not-matter', valid_config, cc, LOG, []) self.assertIn( 'Unexpected error while running command.\n' "Command: ['/bin/sh',", - str(ctxt_manager.exception)) + str(ctxt.exception)) self.assertIn( 'Failed to run bootcmd module does-not-matter', self.logs.getvalue()) diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index f4bbd66d..b16532ea 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -36,13 +36,21 @@ class TestInstallChefOmnibus(HttprettyTestCase): @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) def test_install_chef_from_omnibus_runs_chef_url_content(self): - """install_chef_from_omnibus runs downloaded OMNIBUS_URL as script.""" - chef_outfile = self.tmp_path('chef.out', self.new_root) - response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile) + """install_chef_from_omnibus calls subp_blob_in_tempfile.""" + response = b'#!/bin/bash\necho "Hi Mom"' httpretty.register_uri( httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200) - cc_chef.install_chef_from_omnibus() - self.assertEqual('Hi Mom\n', util.load_file(chef_outfile)) + ret = (None, None) # stdout, stderr but capture=False + + with mock.patch("cloudinit.config.cc_chef.util.subp_blob_in_tempfile", + return_value=ret) as m_subp_blob: + cc_chef.install_chef_from_omnibus() + # admittedly whitebox, but assuming subp_blob_in_tempfile works + # this should be fine. + self.assertEqual( + [mock.call(blob=response, args=[], basename='chef-omnibus-install', + capture=False)], + m_subp_blob.call_args_list) @mock.patch('cloudinit.config.cc_chef.url_helper.readurl') @mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile') diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index f92175fd..feca56c2 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -150,10 +150,12 @@ class TestResizefs(CiTestCase): self.assertEqual(('growfs', '-y', devpth), _resize_ufs(mount_point, devpth)) + @mock.patch('cloudinit.util.is_container', return_value=False) @mock.patch('cloudinit.util.get_mount_info') @mock.patch('cloudinit.util.get_device_info_from_zpool') @mock.patch('cloudinit.util.parse_mount') - def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount): + def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount, + is_container): devpth = 'vmzroot/ROOT/freebsd' disk = 'gpt/system' fs_type = 'zfs' @@ -354,8 +356,10 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): ('btrfs', 'filesystem', 'resize', 'max', '/'), _resize_btrfs("/", "/dev/sda1")) + @mock.patch('cloudinit.util.is_container', return_value=True) @mock.patch('cloudinit.util.is_FreeBSD') - def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd): + def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd, + m_is_container): freebsd.return_value = True info = 'dev=gpt/system mnt_point=/ path=/' devpth = maybe_get_writable_device_path('gpt/system', info, LOG) diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index fb266faf..1bad07f6 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -4,7 +4,7 @@ from cloudinit.config.schema import ( CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, get_schema_doc, get_schema, validate_cloudconfig_file, validate_cloudconfig_schema, main) -from cloudinit.util import subp, write_file +from cloudinit.util import write_file from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema @@ -406,8 +406,14 @@ class CloudTestsIntegrationTest(CiTestCase): integration_testdir = os.path.sep.join( [testsdir, 'cloud_tests', 'testcases']) errors = [] - out, _ = subp(['find', integration_testdir, '-name', '*yaml']) - for filename in out.splitlines(): + + yaml_files = [] + for root, _dirnames, filenames in os.walk(integration_testdir): + yaml_files.extend([os.path.join(root, f) + for f in filenames if f.endswith(".yaml")]) + self.assertTrue(len(yaml_files) > 0) + + for filename in yaml_files: test_cfg = safe_load(open(filename)) cloud_config = test_cfg.get('cloud_config') if cloud_config: diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 05d5c72c..f3165da9 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1653,6 +1653,12 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, class TestGenerateFallbackConfig(CiTestCase): + def setUp(self): + super(TestGenerateFallbackConfig, self).setUp() + self.add_patch( + "cloudinit.util.get_cmdline", "m_get_cmdline", + return_value="root=/dev/sda1") + @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") @@ -1895,12 +1901,13 @@ class TestRhelSysConfigRendering(CiTestCase): if missing: raise AssertionError("Missing headers in: %s" % missing) + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + mock_sys_dev_path, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -2183,12 +2190,13 @@ class TestOpenSuseSysConfigRendering(CiTestCase): if missing: raise AssertionError("Missing headers in: %s" % missing) + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + mock_sys_dev_path, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -2429,12 +2437,13 @@ USERCTL=no class TestEniNetRendering(CiTestCase): + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + mock_sys_dev_path, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) @@ -2482,6 +2491,7 @@ iface eth0 inet dhcp class TestNetplanNetRendering(CiTestCase): + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.netplan._clean_default") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @@ -2489,7 +2499,7 @@ class TestNetplanNetRendering(CiTestCase): def test_default_generation(self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path, - mock_clean_default): + mock_clean_default, m_get_cmdline): tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 7a203ce2..5a14479a 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -24,6 +24,7 @@ except ImportError: BASH = util.which('bash') +BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name' class FakeSelinux(object): @@ -742,6 +743,8 @@ class TestReadSeeded(helpers.TestCase): class TestSubp(helpers.CiTestCase): with_logs = True + allowed_subp = [BASH, 'cat', helpers.CiTestCase.SUBP_SHELL_TRUE, + BOGUS_COMMAND, sys.executable] stdin2err = [BASH, '-c', 'cat >&2'] stdin2out = ['cat'] @@ -749,7 +752,6 @@ class TestSubp(helpers.CiTestCase): utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] - bogus_command = 'this-is-not-expected-to-be-a-program-name' def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf @@ -848,9 +850,10 @@ class TestSubp(helpers.CiTestCase): util.write_file(noshebang, 'true\n') os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) - self.assertRaisesRegex(util.ProcessExecutionError, - r'Missing #! in script\?', - util.subp, (noshebang,)) + with self.allow_subp([noshebang]): + self.assertRaisesRegex(util.ProcessExecutionError, + r'Missing #! in script\?', + util.subp, (noshebang,)) def test_subp_combined_stderr_stdout(self): """Providing combine_capture as True redirects stderr to stdout.""" @@ -868,14 +871,14 @@ class TestSubp(helpers.CiTestCase): def test_exception_has_out_err_are_bytes_if_decode_false(self): """Raised exc should have stderr, stdout as bytes if no decode.""" with self.assertRaises(util.ProcessExecutionError) as cm: - util.subp([self.bogus_command], decode=False) + util.subp([BOGUS_COMMAND], decode=False) self.assertTrue(isinstance(cm.exception.stdout, bytes)) self.assertTrue(isinstance(cm.exception.stderr, bytes)) def test_exception_has_out_err_are_bytes_if_decode_true(self): """Raised exc should have stderr, stdout as string if no decode.""" with self.assertRaises(util.ProcessExecutionError) as cm: - util.subp([self.bogus_command], decode=True) + util.subp([BOGUS_COMMAND], decode=True) self.assertTrue(isinstance(cm.exception.stdout, six.string_types)) self.assertTrue(isinstance(cm.exception.stderr, six.string_types)) @@ -925,10 +928,10 @@ class TestSubp(helpers.CiTestCase): logs.append(log) with self.assertRaises(util.ProcessExecutionError): - util.subp([self.bogus_command], status_cb=status_cb) + util.subp([BOGUS_COMMAND], status_cb=status_cb) expected = [ - 'Begin run command: {cmd}\n'.format(cmd=self.bogus_command), + 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND), 'ERROR: End run command: invalid command provided\n'] self.assertEqual(expected, logs) @@ -940,13 +943,13 @@ class TestSubp(helpers.CiTestCase): logs.append(log) with self.assertRaises(util.ProcessExecutionError): - util.subp(['ls', '/I/dont/exist'], status_cb=status_cb) - util.subp(['ls'], status_cb=status_cb) + util.subp([BASH, '-c', 'exit 2'], status_cb=status_cb) + util.subp([BASH, '-c', 'exit 0'], status_cb=status_cb) expected = [ - 'Begin run command: ls /I/dont/exist\n', + 'Begin run command: %s -c exit 2\n' % BASH, 'ERROR: End run command: exit(2)\n', - 'Begin run command: ls\n', + 'Begin run command: %s -c exit 0\n' % BASH, 'End run command: exit(0)\n'] self.assertEqual(expected, logs) -- cgit v1.2.3 From 757247f9ff2df57e792e29d8656ac415364e914d Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Sat, 8 Sep 2018 01:48:38 +0000 Subject: config: disable ssh access to a configured user account Cloud config can now disable ssh access to non-root users. When defining the 'users' list in cloud-configuration a boolean 'ssh_redirect_user: true' can be provided to disable ssh logins for that user. Any ssh 'public-keys' defined in cloud meta-data will be added and disabled in .ssh/authorized_keys. Any attempts to ssh as this user using acceptable ssh keys will be presented with a message like the following: Please login as the user "ubuntu" rather than the user "youruser". --- cloudinit/config/cc_ssh.py | 7 +- cloudinit/config/cc_users_groups.py | 41 +++++- cloudinit/config/tests/test_ssh.py | 22 ++-- cloudinit/config/tests/test_users_groups.py | 144 ++++++++++++++++++++++ cloudinit/distros/__init__.py | 21 +++- cloudinit/ssh_util.py | 6 + doc/examples/cloud-config-user-groups.txt | 9 ++ doc/examples/cloud-config.txt | 19 ++- tests/unittests/test_distros/test_create_users.py | 91 +++++++++++++- 9 files changed, 337 insertions(+), 23 deletions(-) create mode 100644 cloudinit/config/tests/test_users_groups.py mode change 100755 => 100644 cloudinit/distros/__init__.py (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 45204a07..f8f7cb35 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -101,10 +101,6 @@ from cloudinit.distros import ug_util from cloudinit import ssh_util from cloudinit import util -DISABLE_ROOT_OPTS = ( - "no-port-forwarding,no-agent-forwarding," - "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\"" - " rather than the user \\\"root\\\".\';echo;sleep 10\"") GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' @@ -185,7 +181,7 @@ def handle(_name, cfg, cloud, log, _args): (user, _user_config) = ug_util.extract_default(users) disable_root = util.get_cfg_option_bool(cfg, "disable_root", True) disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts", - DISABLE_ROOT_OPTS) + ssh_util.DISABLE_USER_OPTS) keys = cloud.get_public_ssh_keys() or [] if "ssh_authorized_keys" in cfg: @@ -207,6 +203,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): if not user: user = "NONE" key_prefix = disable_root_opts.replace('$USER', user) + key_prefix = key_prefix.replace('$DISABLE_USER', 'root') else: key_prefix = '' diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index c95bdaad..c32a743a 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -52,8 +52,17 @@ config keys for an entry in ``users`` are as follows: associated with the address, username and SSH keys will be requested from there. Default: none - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's - authkeys file. Default: none - - ``ssh_import_id``: Optional. SSH id to import for user. Default: none + authkeys file. Default: none. This key can not be combined with + ``ssh_redirect_user``. + - ``ssh_import_id``: Optional. SSH id to import for user. Default: none. + This key can not be combined with ``ssh_redirect_user``. + - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH + logins for this user. When specified, all cloud meta-data public ssh + keys will be set up in a disabled state for this username. Any ssh login + as this username will timeout and prompt with a message to login instead + as the configured for this instance. Default: false. + This key can not be combined with ``ssh_import_id`` or + ``ssh_authorized_keys``. - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False. Default: none. An absence of sudo key, or a value of none or false will result in no sudo rules being written for the user. @@ -101,6 +110,7 @@ config keys for an entry in ``users`` are as follows: selinux_user: shell: snapuser: + ssh_redirect_user: ssh_authorized_keys: - - @@ -114,17 +124,44 @@ config keys for an entry in ``users`` are as follows: # since the module attribute 'distros' # is a list of distros that are supported, not a sub-module from cloudinit.distros import ug_util +from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE +LOG = logging.getLogger(__name__) + frequency = PER_INSTANCE def handle(name, cfg, cloud, _log, _args): (users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro) + (default_user, _user_config) = ug_util.extract_default(users) + cloud_keys = cloud.get_public_ssh_keys() or [] for (name, members) in groups.items(): cloud.distro.create_group(name, members) for (user, config) in users.items(): + ssh_redirect_user = config.pop("ssh_redirect_user", False) + if ssh_redirect_user: + if 'ssh_authorized_keys' in config or 'ssh_import_id' in config: + raise ValueError( + 'Not creating user %s. ssh_redirect_user cannot be' + ' provided with ssh_import_id or ssh_authorized_keys' % + user) + if ssh_redirect_user not in (True, 'default'): + raise ValueError( + 'Not creating user %s. Invalid value of' + ' ssh_redirect_user: %s. Expected values: true, default' + ' or false.' % (user, ssh_redirect_user)) + if default_user is None: + LOG.warning( + 'Ignoring ssh_redirect_user: %s for %s.' + ' No default_user defined.' + ' Perhaps missing cloud configuration users: ' + ' [default, ..].', + ssh_redirect_user, user) + else: + config['ssh_redirect_user'] = default_user + config['cloud_public_ssh_keys'] = cloud_keys cloud.distro.create_user(user, **config) # vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py index 7441d9e9..c8a4271f 100644 --- a/cloudinit/config/tests/test_ssh.py +++ b/cloudinit/config/tests/test_ssh.py @@ -2,6 +2,7 @@ from cloudinit.config import cc_ssh +from cloudinit import ssh_util from cloudinit.tests.helpers import CiTestCase, mock MODPATH = "cloudinit.config.cc_ssh." @@ -15,8 +16,7 @@ class TestHandleSsh(CiTestCase): """Apply keys for the given user and root.""" keys = ["key1"] user = "clouduser" - options = cc_ssh.DISABLE_ROOT_OPTS - cc_ssh.apply_credentials(keys, user, False, options) + cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) self.assertEqual([mock.call(set(keys), user), mock.call(set(keys), "root", options="")], m_setup_keys.call_args_list) @@ -25,8 +25,7 @@ class TestHandleSsh(CiTestCase): """Apply keys for root only.""" keys = ["key1"] user = None - options = cc_ssh.DISABLE_ROOT_OPTS - cc_ssh.apply_credentials(keys, user, False, options) + cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) self.assertEqual([mock.call(set(keys), "root", options="")], m_setup_keys.call_args_list) @@ -34,9 +33,10 @@ class TestHandleSsh(CiTestCase): """Apply keys for the given user and disable root ssh.""" keys = ["key1"] user = "clouduser" - options = cc_ssh.DISABLE_ROOT_OPTS + options = ssh_util.DISABLE_USER_OPTS cc_ssh.apply_credentials(keys, user, True, options) options = options.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") self.assertEqual([mock.call(set(keys), user), mock.call(set(keys), "root", options=options)], m_setup_keys.call_args_list) @@ -45,9 +45,10 @@ class TestHandleSsh(CiTestCase): """Apply keys no user and disable root ssh.""" keys = ["key1"] user = None - options = cc_ssh.DISABLE_ROOT_OPTS + options = ssh_util.DISABLE_USER_OPTS cc_ssh.apply_credentials(keys, user, True, options) options = options.replace("$USER", "NONE") + options = options.replace("$DISABLE_USER", "root") self.assertEqual([mock.call(set(keys), "root", options=options)], m_setup_keys.call_args_list) @@ -66,7 +67,8 @@ class TestHandleSsh(CiTestCase): cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) cc_ssh.handle("name", cfg, cloud, None, None) - options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", "NONE") + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE") + options = options.replace("$DISABLE_USER", "root") m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') self.assertIn( [mock.call('/etc/ssh/ssh_host_rsa_key'), @@ -94,7 +96,8 @@ class TestHandleSsh(CiTestCase): distro='ubuntu', metadata={'public-keys': keys}) cc_ssh.handle("name", cfg, cloud, None, None) - options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", user) + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") self.assertEqual([mock.call(set(keys), user), mock.call(set(keys), "root", options=options)], m_setup_keys.call_args_list) @@ -118,7 +121,8 @@ class TestHandleSsh(CiTestCase): distro='ubuntu', metadata={'public-keys': keys}) cc_ssh.handle("name", cfg, cloud, None, None) - options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", user) + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") self.assertEqual([mock.call(set(keys), user), mock.call(set(keys), "root", options=options)], m_setup_keys.call_args_list) diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py new file mode 100644 index 00000000..ba0afae3 --- /dev/null +++ b/cloudinit/config/tests/test_users_groups.py @@ -0,0 +1,144 @@ +# This file is part of cloud-init. See LICENSE file for license information. + + +from cloudinit.config import cc_users_groups +from cloudinit.tests.helpers import CiTestCase, mock + +MODPATH = "cloudinit.config.cc_users_groups" + + +@mock.patch('cloudinit.distros.ubuntu.Distro.create_group') +@mock.patch('cloudinit.distros.ubuntu.Distro.create_user') +class TestHandleUsersGroups(CiTestCase): + """Test cc_users_groups handling of config.""" + + with_logs = True + + def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group): + """Test handle with no config will not create users or groups.""" + cfg = {} # merged cloud-config + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + m_user.assert_not_called() + m_group.assert_not_called() + + def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group): + """When users in config, create users with distro.create_user.""" + cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertItemsEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', default=False)]) + m_group.assert_not_called() + + def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group): + """When ssh_redirect_user is True pass default user and cloud keys.""" + cfg = { + 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertItemsEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, + ssh_redirect_user='ubuntu')]) + m_group.assert_not_called() + + def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group): + """When ssh_redirect_user is 'default' pass default username.""" + cfg = { + 'users': ['default', {'name': 'me2', + 'ssh_redirect_user': 'default'}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertItemsEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, + ssh_redirect_user='ubuntu')]) + m_group.assert_not_called() + + def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group): + """Warn when ssh_redirect_user is not 'default'.""" + cfg = { + 'users': ['default', {'name': 'me2', + 'ssh_redirect_user': 'snowflake'}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + with self.assertRaises(ValueError) as context_manager: + cc_users_groups.handle('modulename', cfg, cloud, None, None) + m_group.assert_not_called() + self.assertEqual( + 'Not creating user me2. Invalid value of ssh_redirect_user:' + ' snowflake. Expected values: true, default or false.', + str(context_manager.exception)) + + def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group): + """When unspecified ssh_redirect_user is false and not set up.""" + cfg = {'users': ['default', {'name': 'me2'}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertItemsEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', default=False)]) + m_group.assert_not_called() + + def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group): + """Warn when ssh_redirect_user is True and no default user present.""" + cfg = { + 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} + # System config defines *no* default user for the distro. + sys_cfg = {} + metadata = {} # no public-keys defined + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + m_user.assert_called_once_with('me2', default=False) + m_group.assert_not_called() + self.assertEqual( + 'WARNING: Ignoring ssh_redirect_user: True for me2. No' + ' default_user defined. Perhaps missing' + ' cloud configuration users: [default, ..].\n', + self.logs.getvalue()) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py old mode 100755 new mode 100644 index d9101ce6..b8a48e85 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -381,6 +381,9 @@ class Distro(object): """ Add a user to the system using standard GNU tools """ + # XXX need to make add_user idempotent somehow as we + # still want to add groups or modify ssh keys on pre-existing + # users in the image. if util.is_user(name): LOG.info("User %s already exists, skipping.", name) return @@ -547,10 +550,24 @@ class Distro(object): LOG.warning("Invalid type '%s' detected for" " 'ssh_authorized_keys', expected list," " string, dict, or set.", type(keys)) + keys = [] else: keys = set(keys) or [] - ssh_util.setup_user_keys(keys, name, options=None) - + ssh_util.setup_user_keys(set(keys), name) + if 'ssh_redirect_user' in kwargs: + cloud_keys = kwargs.get('cloud_public_ssh_keys', []) + if not cloud_keys: + LOG.warning( + 'Unable to disable ssh logins for %s given' + ' ssh_redirect_user: %s. No cloud public-keys present.', + name, kwargs['ssh_redirect_user']) + else: + redirect_user = kwargs['ssh_redirect_user'] + disable_option = ssh_util.DISABLE_USER_OPTS + disable_option = disable_option.replace('$USER', redirect_user) + disable_option = disable_option.replace('$DISABLE_USER', name) + ssh_util.setup_user_keys( + set(cloud_keys), name, options=disable_option) return True def lock_passwd(self, name): diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 73c31772..3f99b58c 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -41,6 +41,12 @@ VALID_KEY_TYPES = ( ) +DISABLE_USER_OPTS = ( + "no-port-forwarding,no-agent-forwarding," + "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\"" + " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10\"") + + class AuthKeyLine(object): def __init__(self, source, keytype=None, base64=None, comment=None, options=None): diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 01ecad7b..6a363b77 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -36,6 +36,8 @@ users: - - - snapuser: joe@joeuser.io + - name: nosshlogins + ssh_redirect_user: true # Valid Values: # name: The user's login name @@ -76,6 +78,13 @@ users: # no_log_init: When set to true, do not initialize lastlog and faillog database. # ssh_import_id: Optional. Import SSH ids # ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file +# ssh_redirect_user: Optional. [bool] Set true to block ssh logins for cloud +# ssh public keys and emit a message redirecting logins to +# use instead. This option only disables cloud +# provided public-keys. An error will be raised if ssh_authorized_keys +# or ssh_import_id is provided for the same user. +# +# ssh_authorized_keys. # sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule # strings or False to explicitly deny sudo usage. Examples: # diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 774f66b9..eb84dcf5 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -232,9 +232,22 @@ disable_root: false # respective key in /root/.ssh/authorized_keys if disable_root is true # see 'man authorized_keys' for more information on what you can do here # -# The string '$USER' will be replaced with the username of the default user -# -# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"root\".';echo;sleep 10" +# The string '$USER' will be replaced with the username of the default user. +# The string '$DISABLE_USER' will be replaced with the username to disable. +# +# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10" + +# disable ssh access for non-root-users +# To disable ssh access for non-root users, ssh_redirect_user: true can be +# provided for any use in the 'users' list. This will prompt any ssh login +# attempts as that user with a message like that in disable_root_opts which +# redirects the person to login as +# This option can not be combined with either ssh_authorized_keys or +# ssh_import_id. +users: + - default + - name: blockeduser + ssh_redirect_user: true # set the locale to a given locale diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index 07176caa..c3f258d5 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -1,7 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +import re + from cloudinit import distros -from cloudinit.tests.helpers import (TestCase, mock) +from cloudinit import ssh_util +from cloudinit.tests.helpers import (CiTestCase, mock) class MyBaseDistro(distros.Distro): @@ -44,8 +47,12 @@ class MyBaseDistro(distros.Distro): @mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False) @mock.patch("cloudinit.distros.util.subp") -class TestCreateUser(TestCase): +class TestCreateUser(CiTestCase): + + with_logs = True + def setUp(self): + super(TestCreateUser, self).setUp() self.dist = MyBaseDistro() def _useradd2call(self, args): @@ -153,4 +160,84 @@ class TestCreateUser(TestCase): [self._useradd2call([user, '-m']), mock.call(['passwd', '-l', user])]) + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_setup_ssh_authorized_keys_with_string( + self, m_setup_user_keys, m_subp, m_is_snappy): + """ssh_authorized_keys allows string and calls setup_user_keys.""" + user = 'foouser' + self.dist.create_user(user, ssh_authorized_keys='mykey') + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + m_setup_user_keys.assert_called_once_with(set(['mykey']), user) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_setup_ssh_authorized_keys_with_list( + self, m_setup_user_keys, m_subp, m_is_snappy): + """ssh_authorized_keys allows lists and calls setup_user_keys.""" + user = 'foouser' + self.dist.create_user(user, ssh_authorized_keys=['key1', 'key2']) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + m_setup_user_keys.assert_called_once_with(set(['key1', 'key2']), user) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_setup_ssh_authorized_keys_with_integer( + self, m_setup_user_keys, m_subp, m_is_snappy): + """ssh_authorized_keys warns on non-iterable/string type.""" + user = 'foouser' + self.dist.create_user(user, ssh_authorized_keys=-1) + m_setup_user_keys.assert_called_once_with(set([]), user) + match = re.match( + r'.*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for' + ' \'ssh_authorized_keys\'.*', + self.logs.getvalue(), + re.DOTALL) + self.assertIsNotNone( + match, 'Missing ssh_authorized_keys invalid type warning') + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_create_user_with_ssh_redirect_user_no_cloud_keys( + self, m_setup_user_keys, m_subp, m_is_snappy): + """Log a warning when trying to redirect a user no cloud ssh keys.""" + user = 'foouser' + self.dist.create_user(user, ssh_redirect_user='someuser') + self.assertIn( + 'WARNING: Unable to disable ssh logins for foouser given ' + 'ssh_redirect_user: someuser. No cloud public-keys present.\n', + self.logs.getvalue()) + m_setup_user_keys.assert_not_called() + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_create_user_with_ssh_redirect_user_with_cloud_keys( + self, m_setup_user_keys, m_subp, m_is_snappy): + """Disable ssh when ssh_redirect_user and cloud ssh keys are set.""" + user = 'foouser' + self.dist.create_user( + user, ssh_redirect_user='someuser', cloud_public_ssh_keys=['key1']) + disable_prefix = ssh_util.DISABLE_USER_OPTS + disable_prefix = disable_prefix.replace('$USER', 'someuser') + disable_prefix = disable_prefix.replace('$DISABLE_USER', user) + m_setup_user_keys.assert_called_once_with( + set(['key1']), 'foouser', options=disable_prefix) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys( + self, m_setup_user_keys, m_subp, m_is_snappy): + """Do not disable ssh_authorized_keys when ssh_redirect_user is set.""" + user = 'foouser' + self.dist.create_user( + user, ssh_authorized_keys='auth1', ssh_redirect_user='someuser', + cloud_public_ssh_keys=['key1']) + disable_prefix = ssh_util.DISABLE_USER_OPTS + disable_prefix = disable_prefix.replace('$USER', 'someuser') + disable_prefix = disable_prefix.replace('$DISABLE_USER', user) + self.assertEqual( + m_setup_user_keys.call_args_list, + [mock.call(set(['auth1']), user), # not disabled + mock.call(set(['key1']), 'foouser', options=disable_prefix)]) + # vi: ts=4 expandtab -- cgit v1.2.3 From c7555762f3a30190ce7726b4d013bc3e83c7e4b6 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 11 Sep 2018 17:31:46 +0000 Subject: user-data: jinja template to render instance-data.json in cloud-config Allow users to provide '## template: jinja' as the first line or their #cloud-config or custom script user-data parts. When this header exists, the cloud-config or script will be rendered as a jinja template. All instance metadata keys and values present in /run/cloud-init/instance-data.json will be available as jinja variables for the template. This means any cloud-config module or script can reference any standardized instance data in templates and scripts. Additionally, any standardized instance-data.json keys scoped below a '' key will be promoted as a top-level key for ease of reference in templates. This means that '{{ local_hostname }}' is the same as using the latest '{{ v#.local_hostname }}'. Since instance-data is written to /run/cloud-init/instance-data.json, make sure it is persisted across reboots when the cached datasource opject is reloaded. LP: #1791781 --- bash_completion/cloud-init | 2 + cloudinit/cmd/devel/__init__.py | 25 ++ cloudinit/cmd/devel/parser.py | 5 +- cloudinit/cmd/devel/render.py | 90 ++++++ cloudinit/cmd/devel/tests/test_render.py | 101 +++++++ cloudinit/cmd/main.py | 16 +- cloudinit/handlers/__init__.py | 11 +- cloudinit/handlers/boot_hook.py | 12 +- cloudinit/handlers/cloud_config.py | 15 +- cloudinit/handlers/jinja_template.py | 137 +++++++++ cloudinit/handlers/shell_script.py | 9 +- cloudinit/handlers/upstart_job.py | 9 +- cloudinit/helpers.py | 4 + cloudinit/log.py | 12 +- cloudinit/sources/__init__.py | 47 ++- cloudinit/sources/tests/test_init.py | 75 ++++- cloudinit/stages.py | 22 +- cloudinit/templater.py | 28 +- cloudinit/tests/helpers.py | 9 + doc/rtd/topics/capabilities.rst | 15 +- doc/rtd/topics/datasources.rst | 47 +++ doc/rtd/topics/format.rst | 21 +- tests/cloud_tests/testcases/base.py | 8 +- tests/unittests/test_builtin_handlers.py | 324 +++++++++++++++++++-- .../test_handler/test_handler_etc_hosts.py | 1 + tests/unittests/test_handler/test_handler_ntp.py | 1 + tests/unittests/test_templating.py | 23 ++ 27 files changed, 959 insertions(+), 110 deletions(-) create mode 100755 cloudinit/cmd/devel/render.py create mode 100644 cloudinit/cmd/devel/tests/test_render.py create mode 100644 cloudinit/handlers/jinja_template.py (limited to 'tests/unittests') diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init index f38164b0..b3a5ced3 100644 --- a/bash_completion/cloud-init +++ b/bash_completion/cloud-init @@ -62,6 +62,8 @@ _cloudinit_complete() net-convert) COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word)) ;; + render) + COMPREPLY=($(compgen -W "--help --instance-data --debug" -- $cur_word)) schema) COMPREPLY=($(compgen -W "--help --config-file --doc --annotate" -- $cur_word)) ;; diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py index e69de29b..3ae28b69 100644 --- a/cloudinit/cmd/devel/__init__.py +++ b/cloudinit/cmd/devel/__init__.py @@ -0,0 +1,25 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Common cloud-init devel commandline utility functions.""" + + +import logging + +from cloudinit import log +from cloudinit.stages import Init + + +def addLogHandlerCLI(logger, log_level): + """Add a commandline logging handler to emit messages to stderr.""" + formatter = logging.Formatter('%(levelname)s: %(message)s') + log.setupBasicLogging(log_level, formatter=formatter) + return logger + + +def read_cfg_paths(): + """Return a Paths object based on the system configuration on disk.""" + init = Init(ds_deps=[]) + init.read_cfg() + return init.paths + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index 40a4b019..99a234ce 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -8,6 +8,7 @@ import argparse from cloudinit.config import schema from . import net_convert +from . import render def get_parser(parser=None): @@ -22,7 +23,9 @@ def get_parser(parser=None): ('schema', 'Validate cloud-config files for document schema', schema.get_parser, schema.handle_schema_args), (net_convert.NAME, net_convert.__doc__, - net_convert.get_parser, net_convert.handle_args) + net_convert.get_parser, net_convert.handle_args), + (render.NAME, render.__doc__, + render.get_parser, render.handle_args) ] for (subcmd, helpmsg, get_parser, handler) in subcmds: parser = subparsers.add_parser(subcmd, help=helpmsg) diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py new file mode 100755 index 00000000..e85933db --- /dev/null +++ b/cloudinit/cmd/devel/render.py @@ -0,0 +1,90 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Debug jinja template rendering of user-data.""" + +import argparse +import os +import sys + +from cloudinit.handlers.jinja_template import render_jinja_payload_from_file +from cloudinit import log +from cloudinit.sources import INSTANCE_JSON_FILE +from cloudinit import util +from . import addLogHandlerCLI, read_cfg_paths + +NAME = 'render' +DEFAULT_INSTANCE_DATA = '/run/cloud-init/instance-data.json' + +LOG = log.getLogger(NAME) + + +def get_parser(parser=None): + """Build or extend and arg parser for jinja render utility. + + @param parser: Optional existing ArgumentParser instance representing the + subcommand which will be extended to support the args of this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) + parser.add_argument( + 'user_data', type=str, help='Path to the user-data file to render') + parser.add_argument( + '-i', '--instance-data', type=str, + help=('Optional path to instance-data.json file. Defaults to' + ' /run/cloud-init/instance-data.json')) + parser.add_argument('-d', '--debug', action='store_true', default=False, + help='Add verbose messages during template render') + return parser + + +def handle_args(name, args): + """Render the provided user-data template file using instance-data values. + + Also setup CLI log handlers to report to stderr since this is a development + utility which should be run by a human on the CLI. + + @return 0 on success, 1 on failure. + """ + addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) + if not args.instance_data: + paths = read_cfg_paths() + instance_data_fn = os.path.join( + paths.run_dir, INSTANCE_JSON_FILE) + else: + instance_data_fn = args.instance_data + try: + with open(instance_data_fn) as stream: + instance_data = stream.read() + instance_data = util.load_json(instance_data) + except IOError: + LOG.error('Missing instance-data.json file: %s', instance_data_fn) + return 1 + try: + with open(args.user_data) as stream: + user_data = stream.read() + except IOError: + LOG.error('Missing user-data file: %s', args.user_data) + return 1 + rendered_payload = render_jinja_payload_from_file( + payload=user_data, payload_fn=args.user_data, + instance_data_file=instance_data_fn, + debug=True if args.debug else False) + if not rendered_payload: + LOG.error('Unable to render user-data file: %s', args.user_data) + return 1 + sys.stdout.write(rendered_payload) + return 0 + + +def main(): + args = get_parser().parse_args() + return(handle_args(NAME, args)) + + +if __name__ == '__main__': + sys.exit(main()) + + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py new file mode 100644 index 00000000..fc5d2c0d --- /dev/null +++ b/cloudinit/cmd/devel/tests/test_render.py @@ -0,0 +1,101 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from six import StringIO +import os + +from collections import namedtuple +from cloudinit.cmd.devel import render +from cloudinit.helpers import Paths +from cloudinit.sources import INSTANCE_JSON_FILE +from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja +from cloudinit.util import ensure_dir, write_file + + +class TestRender(CiTestCase): + + with_logs = True + + args = namedtuple('renderargs', 'user_data instance_data debug') + + def setUp(self): + super(TestRender, self).setUp() + self.tmp = self.tmp_dir() + + def test_handle_args_error_on_missing_user_data(self): + """When user_data file path does not exist, log an error.""" + absent_file = self.tmp_path('user-data', dir=self.tmp) + instance_data = self.tmp_path('instance-data', dir=self.tmp) + write_file(instance_data, '{}') + args = self.args( + user_data=absent_file, instance_data=instance_data, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + self.assertEqual(1, render.handle_args('anyname', args)) + self.assertIn( + 'Missing user-data file: %s' % absent_file, + self.logs.getvalue()) + + def test_handle_args_error_on_missing_instance_data(self): + """When instance_data file path does not exist, log an error.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + absent_file = self.tmp_path('instance-data', dir=self.tmp) + args = self.args( + user_data=user_data, instance_data=absent_file, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + self.assertEqual(1, render.handle_args('anyname', args)) + self.assertIn( + 'Missing instance-data.json file: %s' % absent_file, + self.logs.getvalue()) + + def test_handle_args_defaults_instance_data(self): + """When no instance_data argument, default to configured run_dir.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + run_dir = self.tmp_path('run_dir', dir=self.tmp) + ensure_dir(run_dir) + paths = Paths({'run_dir': run_dir}) + self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') + self.m_paths.return_value = paths + args = self.args( + user_data=user_data, instance_data=None, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + self.assertEqual(1, render.handle_args('anyname', args)) + json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) + self.assertIn( + 'Missing instance-data.json file: %s' % json_file, + self.logs.getvalue()) + + @skipUnlessJinja() + def test_handle_args_renders_instance_data_vars_in_template(self): + """If user_data file is a jinja template render instance-data vars.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') + instance_data = self.tmp_path('instance-data', dir=self.tmp) + write_file(instance_data, '{"my-var": "jinja worked"}') + args = self.args( + user_data=user_data, instance_data=instance_data, debug=True) + with mock.patch('sys.stderr', new_callable=StringIO) as m_console_err: + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + self.assertEqual(0, render.handle_args('anyname', args)) + self.assertIn( + 'DEBUG: Converted jinja variables\n{', self.logs.getvalue()) + self.assertIn( + 'DEBUG: Converted jinja variables\n{', m_console_err.getvalue()) + self.assertEqual('rendering: jinja worked', m_stdout.getvalue()) + + @skipUnlessJinja() + def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self): + """If user_data file has invalid jinja operations log warnings.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + write_file(user_data, '##template: jinja\nrendering: {{ my-var }}') + instance_data = self.tmp_path('instance-data', dir=self.tmp) + write_file(instance_data, '{"my-var": "jinja worked"}') + args = self.args( + user_data=user_data, instance_data=instance_data, debug=True) + with mock.patch('sys.stderr', new_callable=StringIO): + self.assertEqual(1, render.handle_args('anyname', args)) + self.assertIn( + 'WARNING: Ignoring jinja template for %s: Undefined jinja' + ' variable: "my-var". Jinja tried subtraction. Perhaps you meant' + ' "my_var"?' % user_data, + self.logs.getvalue()) + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 4ea4fe7f..0eee583c 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -348,6 +348,7 @@ def main_init(name, args): LOG.debug("[%s] barreling on in force mode without datasource", mode) + _maybe_persist_instance_data(init) # Stage 6 iid = init.instancify() LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", @@ -490,6 +491,7 @@ def main_modules(action_name, args): print_exc(msg) if not args.force: return [(msg)] + _maybe_persist_instance_data(init) # Stage 3 mods = stages.Modules(init, extract_fns(args), reporter=args.reporter) # Stage 4 @@ -541,6 +543,7 @@ def main_single(name, args): " likely bad things to come!")) if not args.force: return 1 + _maybe_persist_instance_data(init) # Stage 3 mods = stages.Modules(init, extract_fns(args), reporter=args.reporter) mod_args = args.module_args @@ -688,6 +691,15 @@ def status_wrapper(name, args, data_d=None, link_d=None): return len(v1[mode]['errors']) +def _maybe_persist_instance_data(init): + """Write instance-data.json file if absent and datasource is restored.""" + if init.ds_restored: + instance_data_file = os.path.join( + init.paths.run_dir, sources.INSTANCE_JSON_FILE) + if not os.path.exists(instance_data_file): + init.datasource.persist_instance_data() + + def _maybe_set_hostname(init, stage, retry_stage): """Call set-hostname if metadata, vendordata or userdata provides it. @@ -887,6 +899,8 @@ def main(sysv_args=None): if __name__ == '__main__': if 'TZ' not in os.environ: os.environ['TZ'] = ":/etc/localtime" - main(sys.argv) + return_value = main(sys.argv) + if return_value: + sys.exit(return_value) # vi: ts=4 expandtab diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index c3576c04..0db75af9 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -41,7 +41,7 @@ PART_HANDLER_FN_TMPL = 'part-handler-%03d' # For parts without filenames PART_FN_TPL = 'part-%03d' -# Different file beginnings to there content type +# Different file beginnings to their content type INCLUSION_TYPES_MAP = { '#include': 'text/x-include-url', '#include-once': 'text/x-include-once-url', @@ -52,6 +52,7 @@ INCLUSION_TYPES_MAP = { '#cloud-boothook': 'text/cloud-boothook', '#cloud-config-archive': 'text/cloud-config-archive', '#cloud-config-jsonp': 'text/cloud-config-jsonp', + '## template: jinja': 'text/jinja2', } # Sorted longest first @@ -69,9 +70,13 @@ class Handler(object): def __repr__(self): return "%s: [%s]" % (type_utils.obj_name(self), self.list_types()) - @abc.abstractmethod def list_types(self): - raise NotImplementedError() + # Each subclass must define the supported content prefixes it handles. + if not hasattr(self, 'prefixes'): + raise NotImplementedError('Missing prefixes subclass attribute') + else: + return [INCLUSION_TYPES_MAP[prefix] + for prefix in getattr(self, 'prefixes')] @abc.abstractmethod def handle_part(self, *args, **kwargs): diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index 057b4dbc..dca50a49 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -17,10 +17,13 @@ from cloudinit import util from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) -BOOTHOOK_PREFIX = "#cloud-boothook" class BootHookPartHandler(handlers.Handler): + + # The content prefixes this handler understands. + prefixes = ['#cloud-boothook'] + def __init__(self, paths, datasource, **_kwargs): handlers.Handler.__init__(self, PER_ALWAYS) self.boothook_dir = paths.get_ipath("boothooks") @@ -28,16 +31,11 @@ class BootHookPartHandler(handlers.Handler): if datasource: self.instance_id = datasource.get_instance_id() - def list_types(self): - return [ - handlers.type_from_starts_with(BOOTHOOK_PREFIX), - ] - def _write_part(self, payload, filename): filename = util.clean_filename(filename) filepath = os.path.join(self.boothook_dir, filename) contents = util.strip_prefix_suffix(util.dos2unix(payload), - prefix=BOOTHOOK_PREFIX) + prefix=self.prefixes[0]) util.write_file(filepath, contents.lstrip(), 0o700) return filepath diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 178a5b9b..99bf0e61 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -42,14 +42,12 @@ DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()') CLOUD_PREFIX = "#cloud-config" JSONP_PREFIX = "#cloud-config-jsonp" -# The file header -> content types this module will handle. -CC_TYPES = { - JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX), - CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX), -} - class CloudConfigPartHandler(handlers.Handler): + + # The content prefixes this handler understands. + prefixes = [CLOUD_PREFIX, JSONP_PREFIX] + def __init__(self, paths, **_kwargs): handlers.Handler.__init__(self, PER_ALWAYS, version=3) self.cloud_buf = None @@ -58,9 +56,6 @@ class CloudConfigPartHandler(handlers.Handler): self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"]) self.file_names = [] - def list_types(self): - return list(CC_TYPES.values()) - def _write_cloud_config(self): if not self.cloud_fn: return @@ -138,7 +133,7 @@ class CloudConfigPartHandler(handlers.Handler): # First time through, merge with an empty dict... if self.cloud_buf is None or not self.file_names: self.cloud_buf = {} - if ctype == CC_TYPES[JSONP_PREFIX]: + if ctype == handlers.INCLUSION_TYPES_MAP[JSONP_PREFIX]: self._merge_patch(payload) else: self._merge_part(payload, headers) diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py new file mode 100644 index 00000000..3fa4097e --- /dev/null +++ b/cloudinit/handlers/jinja_template.py @@ -0,0 +1,137 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import re + +try: + from jinja2.exceptions import UndefinedError as JUndefinedError +except ImportError: + # No jinja2 dependency + JUndefinedError = Exception + +from cloudinit import handlers +from cloudinit import log as logging +from cloudinit.sources import INSTANCE_JSON_FILE +from cloudinit.templater import render_string, MISSING_JINJA_PREFIX +from cloudinit.util import b64d, load_file, load_json, json_dumps + +from cloudinit.settings import PER_ALWAYS + +LOG = logging.getLogger(__name__) + + +class JinjaTemplatePartHandler(handlers.Handler): + + prefixes = ['## template: jinja'] + + def __init__(self, paths, **_kwargs): + handlers.Handler.__init__(self, PER_ALWAYS, version=3) + self.paths = paths + self.sub_handlers = {} + for handler in _kwargs.get('sub_handlers', []): + for ctype in handler.list_types(): + self.sub_handlers[ctype] = handler + + def handle_part(self, data, ctype, filename, payload, frequency, headers): + if ctype in handlers.CONTENT_SIGNALS: + return + jinja_json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) + rendered_payload = render_jinja_payload_from_file( + payload, filename, jinja_json_file) + if not rendered_payload: + return + subtype = handlers.type_from_starts_with(rendered_payload) + sub_handler = self.sub_handlers.get(subtype) + if not sub_handler: + LOG.warning( + 'Ignoring jinja template for %s. Could not find supported' + ' sub-handler for type %s', filename, subtype) + return + if sub_handler.handler_version == 3: + sub_handler.handle_part( + data, ctype, filename, rendered_payload, frequency, headers) + elif sub_handler.handler_version == 2: + sub_handler.handle_part( + data, ctype, filename, rendered_payload, frequency) + + +def render_jinja_payload_from_file( + payload, payload_fn, instance_data_file, debug=False): + """Render a jinja template payload sourcing variables from jinja_vars_path. + + @param payload: String of jinja template content. Should begin with + ## template: jinja\n. + @param payload_fn: String representing the filename from which the payload + was read used in error reporting. Generally in part-handling this is + 'part-##'. + @param instance_data_file: A path to a json file containing variables that + will be used as jinja template variables. + + @return: A string of jinja-rendered content with the jinja header removed. + Returns None on error. + """ + instance_data = {} + rendered_payload = None + if not os.path.exists(instance_data_file): + raise RuntimeError( + 'Cannot render jinja template vars. Instance data not yet' + ' present at %s' % instance_data_file) + instance_data = load_json(load_file(instance_data_file)) + rendered_payload = render_jinja_payload( + payload, payload_fn, instance_data, debug) + if not rendered_payload: + return None + return rendered_payload + + +def render_jinja_payload(payload, payload_fn, instance_data, debug=False): + instance_jinja_vars = convert_jinja_instance_data( + instance_data, + decode_paths=instance_data.get('base64-encoded-keys', [])) + if debug: + LOG.debug('Converted jinja variables\n%s', + json_dumps(instance_jinja_vars)) + try: + rendered_payload = render_string(payload, instance_jinja_vars) + except (TypeError, JUndefinedError) as e: + LOG.warning( + 'Ignoring jinja template for %s: %s', payload_fn, str(e)) + return None + warnings = [ + "'%s'" % var.replace(MISSING_JINJA_PREFIX, '') + for var in re.findall( + r'%s[^\s]+' % MISSING_JINJA_PREFIX, rendered_payload)] + if warnings: + LOG.warning( + "Could not render jinja template variables in file '%s': %s", + payload_fn, ', '.join(warnings)) + return rendered_payload + + +def convert_jinja_instance_data(data, prefix='', sep='/', decode_paths=()): + """Process instance-data.json dict for use in jinja templates. + + Replace hyphens with underscores for jinja templates and decode any + base64_encoded_keys. + """ + result = {} + decode_paths = [path.replace('-', '_') for path in decode_paths] + for key, value in sorted(data.items()): + if '-' in key: + # Standardize keys for use in #cloud-config/shell templates + key = key.replace('-', '_') + key_path = '{0}{1}{2}'.format(prefix, sep, key) if prefix else key + if key_path in decode_paths: + value = b64d(value) + if isinstance(value, dict): + result[key] = convert_jinja_instance_data( + value, key_path, sep=sep, decode_paths=decode_paths) + if re.match(r'v\d+', key): + # Copy values to top-level aliases + for subkey, subvalue in result[key].items(): + result[subkey] = subvalue + else: + result[key] = value + return result + +# vi: ts=4 expandtab diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index e4945a23..214714bc 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -17,21 +17,18 @@ from cloudinit import util from cloudinit.settings import (PER_ALWAYS) LOG = logging.getLogger(__name__) -SHELL_PREFIX = "#!" class ShellScriptPartHandler(handlers.Handler): + + prefixes = ['#!'] + def __init__(self, paths, **_kwargs): handlers.Handler.__init__(self, PER_ALWAYS) self.script_dir = paths.get_ipath_cur('scripts') if 'script_path' in _kwargs: self.script_dir = paths.get_ipath_cur(_kwargs['script_path']) - def list_types(self): - return [ - handlers.type_from_starts_with(SHELL_PREFIX), - ] - def handle_part(self, data, ctype, filename, payload, frequency): if ctype in handlers.CONTENT_SIGNALS: # TODO(harlowja): maybe delete existing things here diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index dc338769..83fb0724 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -18,19 +18,16 @@ from cloudinit import util from cloudinit.settings import (PER_INSTANCE) LOG = logging.getLogger(__name__) -UPSTART_PREFIX = "#upstart-job" class UpstartJobPartHandler(handlers.Handler): + + prefixes = ['#upstart-job'] + def __init__(self, paths, **_kwargs): handlers.Handler.__init__(self, PER_INSTANCE) self.upstart_dir = paths.upstart_conf_d - def list_types(self): - return [ - handlers.type_from_starts_with(UPSTART_PREFIX), - ] - def handle_part(self, data, ctype, filename, payload, frequency): if ctype in handlers.CONTENT_SIGNALS: return diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 1979cd96..3cc1fb19 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -449,4 +449,8 @@ class DefaultingConfigParser(RawConfigParser): contents = '\n'.join([header, contents, '']) return contents + +def identity(object): + return object + # vi: ts=4 expandtab diff --git a/cloudinit/log.py b/cloudinit/log.py index 1d75c9ff..5ae312ba 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -38,10 +38,18 @@ DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s' logging.Formatter.converter = time.gmtime -def setupBasicLogging(level=DEBUG): +def setupBasicLogging(level=DEBUG, formatter=None): + if not formatter: + formatter = logging.Formatter(DEF_CON_FORMAT) root = logging.getLogger() + for handler in root.handlers: + if hasattr(handler, 'stream') and hasattr(handler.stream, 'name'): + if handler.stream.name == '': + handler.setLevel(level) + return + # Didn't have an existing stderr handler; create a new handler console = logging.StreamHandler(sys.stderr) - console.setFormatter(logging.Formatter(DEF_CON_FORMAT)) + console.setFormatter(formatter) console.setLevel(level) root.addHandler(console) root.setLevel(level) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 41fde9ba..a775f1a8 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -58,22 +58,27 @@ class InvalidMetaDataException(Exception): pass -def process_base64_metadata(metadata, key_path=''): - """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" +def process_instance_metadata(metadata, key_path=''): + """Process all instance metadata cleaning it up for persisting as json. + + Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list + + @return Dict copy of processed metadata. + """ md_copy = copy.deepcopy(metadata) - md_copy['base64-encoded-keys'] = [] + md_copy['base64_encoded_keys'] = [] for key, val in metadata.items(): if key_path: sub_key_path = key_path + '/' + key else: sub_key_path = key if isinstance(val, str) and val.startswith('ci-b64:'): - md_copy['base64-encoded-keys'].append(sub_key_path) + md_copy['base64_encoded_keys'].append(sub_key_path) md_copy[key] = val.replace('ci-b64:', '') if isinstance(val, dict): - return_val = process_base64_metadata(val, sub_key_path) - md_copy['base64-encoded-keys'].extend( - return_val.pop('base64-encoded-keys')) + return_val = process_instance_metadata(val, sub_key_path) + md_copy['base64_encoded_keys'].extend( + return_val.pop('base64_encoded_keys')) md_copy[key] = return_val return md_copy @@ -180,15 +185,24 @@ class DataSource(object): """ self._dirty_cache = True return_value = self._get_data() - json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) if not return_value: return return_value + self.persist_instance_data() + return return_value + + def persist_instance_data(self): + """Process and write INSTANCE_JSON_FILE with all instance metadata. + Replace any hyphens with underscores in key names for use in template + processing. + + @return True on successful write, False otherwise. + """ instance_data = { 'ds': { - 'meta-data': self.metadata, - 'user-data': self.get_userdata_raw(), - 'vendor-data': self.get_vendordata_raw()}} + 'meta_data': self.metadata, + 'user_data': self.get_userdata_raw(), + 'vendor_data': self.get_vendordata_raw()}} if hasattr(self, 'network_json'): network_json = getattr(self, 'network_json') if network_json != UNSET: @@ -202,16 +216,17 @@ class DataSource(object): try: # Process content base64encoding unserializable values content = util.json_dumps(instance_data) - # Strip base64: prefix and return base64-encoded-keys - processed_data = process_base64_metadata(json.loads(content)) + # Strip base64: prefix and set base64_encoded_keys list. + processed_data = process_instance_metadata(json.loads(content)) except TypeError as e: LOG.warning('Error persisting instance-data.json: %s', str(e)) - return return_value + return False except UnicodeDecodeError as e: LOG.warning('Error persisting instance-data.json: %s', str(e)) - return return_value + return False + json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) write_json(json_file, processed_data, mode=0o600) - return return_value + return True def _get_data(self): """Walk metadata sources, process crawled data and save attributes.""" diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 9e939c1e..8299af23 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -20,10 +20,12 @@ class DataSourceTestSubclassNet(DataSource): dsname = 'MyTestSubclass' url_max_wait = 55 - def __init__(self, sys_cfg, distro, paths, custom_userdata=None): + def __init__(self, sys_cfg, distro, paths, custom_userdata=None, + get_data_retval=True): super(DataSourceTestSubclassNet, self).__init__( sys_cfg, distro, paths) self._custom_userdata = custom_userdata + self._get_data_retval = get_data_retval def _get_cloud_name(self): return 'SubclassCloudName' @@ -37,7 +39,7 @@ class DataSourceTestSubclassNet(DataSource): else: self.userdata_raw = 'userdata_raw' self.vendordata_raw = 'vendordata_raw' - return True + return self._get_data_retval class InvalidDataSourceTestSubclassNet(DataSource): @@ -264,7 +266,18 @@ class TestDataSource(CiTestCase): self.assertEqual('fqdnhostname.domain.com', datasource.get_hostname(fqdn=True)) - def test_get_data_write_json_instance_data(self): + def test_get_data_does_not_write_instance_data_on_failure(self): + """get_data does not write INSTANCE_JSON_FILE on get_data False.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + get_data_retval=False) + self.assertFalse(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + self.assertFalse( + os.path.exists(json_file), 'Found unexpected file %s' % json_file) + + def test_get_data_writes_json_instance_data_on_success(self): """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( @@ -273,7 +286,7 @@ class TestDataSource(CiTestCase): json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) content = util.load_file(json_file) expected = { - 'base64-encoded-keys': [], + 'base64_encoded_keys': [], 'v1': { 'availability-zone': 'myaz', 'cloud-name': 'subclasscloudname', @@ -281,11 +294,12 @@ class TestDataSource(CiTestCase): 'local-hostname': 'test-subclass-hostname', 'region': 'myregion'}, 'ds': { - 'meta-data': {'availability_zone': 'myaz', + 'meta_data': {'availability_zone': 'myaz', 'local-hostname': 'test-subclass-hostname', 'region': 'myregion'}, - 'user-data': 'userdata_raw', - 'vendor-data': 'vendordata_raw'}} + 'user_data': 'userdata_raw', + 'vendor_data': 'vendordata_raw'}} + self.maxDiff = None self.assertEqual(expected, util.load_json(content)) file_stat = os.stat(json_file) self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) @@ -296,7 +310,7 @@ class TestDataSource(CiTestCase): datasource = DataSourceTestSubclassNet( self.sys_cfg, self.distro, Paths({'run_dir': tmp}), custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) - self.assertTrue(datasource.get_data()) + datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) content = util.load_file(json_file) expected_userdata = { @@ -306,7 +320,40 @@ class TestDataSource(CiTestCase): " 'cloudinit.helpers.Paths'>"}} instance_json = util.load_json(content) self.assertEqual( - expected_userdata, instance_json['ds']['user-data']) + expected_userdata, instance_json['ds']['user_data']) + + def test_persist_instance_data_writes_ec2_metadata_when_set(self): + """When ec2_metadata class attribute is set, persist to json.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + datasource.ec2_metadata = UNSET + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + instance_data = util.load_json(util.load_file(json_file)) + self.assertNotIn('ec2_metadata', instance_data['ds']) + datasource.ec2_metadata = {'ec2stuff': 'is good'} + datasource.persist_instance_data() + instance_data = util.load_json(util.load_file(json_file)) + self.assertEqual( + {'ec2stuff': 'is good'}, + instance_data['ds']['ec2_metadata']) + + def test_persist_instance_data_writes_network_json_when_set(self): + """When network_data.json class attribute is set, persist to json.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + instance_data = util.load_json(util.load_file(json_file)) + self.assertNotIn('network_json', instance_data['ds']) + datasource.network_json = {'network_json': 'is good'} + datasource.persist_instance_data() + instance_data = util.load_json(util.load_file(json_file)) + self.assertEqual( + {'network_json': 'is good'}, + instance_data['ds']['network_json']) @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes") def test_get_data_base64encodes_unserializable_bytes(self): @@ -320,11 +367,11 @@ class TestDataSource(CiTestCase): content = util.load_file(json_file) instance_json = util.load_json(content) self.assertEqual( - ['ds/user-data/key2/key2.1'], - instance_json['base64-encoded-keys']) + ['ds/user_data/key2/key2.1'], + instance_json['base64_encoded_keys']) self.assertEqual( {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, - instance_json['ds']['user-data']) + instance_json['ds']['user_data']) @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes") def test_get_data_handles_bytes_values(self): @@ -337,10 +384,10 @@ class TestDataSource(CiTestCase): json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) content = util.load_file(json_file) instance_json = util.load_json(content) - self.assertEqual([], instance_json['base64-encoded-keys']) + self.assertEqual([], instance_json['base64_encoded_keys']) self.assertEqual( {'key1': 'val1', 'key2': {'key2.1': '\x123'}}, - instance_json['ds']['user-data']) + instance_json['ds']['user_data']) @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") def test_non_utf8_encoding_logs_warning(self): diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8874d405..ef5c6996 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -17,10 +17,11 @@ from cloudinit.settings import ( from cloudinit import handlers # Default handlers (used if not overridden) -from cloudinit.handlers import boot_hook as bh_part -from cloudinit.handlers import cloud_config as cc_part -from cloudinit.handlers import shell_script as ss_part -from cloudinit.handlers import upstart_job as up_part +from cloudinit.handlers.boot_hook import BootHookPartHandler +from cloudinit.handlers.cloud_config import CloudConfigPartHandler +from cloudinit.handlers.jinja_template import JinjaTemplatePartHandler +from cloudinit.handlers.shell_script import ShellScriptPartHandler +from cloudinit.handlers.upstart_job import UpstartJobPartHandler from cloudinit.event import EventType @@ -413,12 +414,17 @@ class Init(object): 'datasource': self.datasource, }) # TODO(harlowja) Hmmm, should we dynamically import these?? + cloudconfig_handler = CloudConfigPartHandler(**opts) + shellscript_handler = ShellScriptPartHandler(**opts) def_handlers = [ - cc_part.CloudConfigPartHandler(**opts), - ss_part.ShellScriptPartHandler(**opts), - bh_part.BootHookPartHandler(**opts), - up_part.UpstartJobPartHandler(**opts), + cloudconfig_handler, + shellscript_handler, + BootHookPartHandler(**opts), + UpstartJobPartHandler(**opts), ] + opts.update( + {'sub_handlers': [cloudconfig_handler, shellscript_handler]}) + def_handlers.append(JinjaTemplatePartHandler(**opts)) return def_handlers def _default_userdata_handlers(self): diff --git a/cloudinit/templater.py b/cloudinit/templater.py index 7e7acb86..b668674b 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -13,6 +13,7 @@ import collections import re + try: from Cheetah.Template import Template as CTemplate CHEETAH_AVAILABLE = True @@ -20,23 +21,44 @@ except (ImportError, AttributeError): CHEETAH_AVAILABLE = False try: - import jinja2 + from jinja2.runtime import implements_to_string from jinja2 import Template as JTemplate + from jinja2 import DebugUndefined as JUndefined JINJA_AVAILABLE = True except (ImportError, AttributeError): + from cloudinit.helpers import identity + implements_to_string = identity JINJA_AVAILABLE = False + JUndefined = object from cloudinit import log as logging from cloudinit import type_utils as tu from cloudinit import util + LOG = logging.getLogger(__name__) TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I) BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)') +MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/' + + +@implements_to_string # Needed for python2.7. Otherwise cached super.__str__ +class UndefinedJinjaVariable(JUndefined): + """Class used to represent any undefined jinja template varible.""" + + def __str__(self): + return u'%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name) + + def __sub__(self, other): + other = str(other).replace(MISSING_JINJA_PREFIX, '') + raise TypeError( + 'Undefined jinja variable: "{this}-{other}". Jinja tried' + ' subtraction. Perhaps you meant "{this}_{other}"?'.format( + this=self._undefined_name, other=other)) def basic_render(content, params): - """This does simple replacement of bash variable like templates. + """This does sumple replacement of bash variable like templates. It identifies patterns like ${a} or $a and can also identify patterns like ${a.b} or $a.b which will look for a key 'b' in the dictionary rooted @@ -82,7 +104,7 @@ def detect_template(text): # keep_trailing_newline is in jinja2 2.7+, not 2.6 add = "\n" if content.endswith("\n") else "" return JTemplate(content, - undefined=jinja2.StrictUndefined, + undefined=UndefinedJinjaVariable, trim_blocks=True).render(**params) + add if text.find("\n") != -1: diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 42f56c27..2eb7b0cd 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -32,6 +32,7 @@ from cloudinit import cloud from cloudinit import distros from cloudinit import helpers as ch from cloudinit.sources import DataSourceNone +from cloudinit.templater import JINJA_AVAILABLE from cloudinit import util _real_subp = util.subp @@ -518,6 +519,14 @@ def skipUnlessJsonSchema(): _missing_jsonschema_dep, "No python-jsonschema dependency present.") +def skipUnlessJinja(): + return skipIf(not JINJA_AVAILABLE, "No jinja dependency present.") + + +def skipIfJinja(): + return skipIf(JINJA_AVAILABLE, "Jinja dependency present.") + + # older versions of mock do not have the useful 'assert_not_called' if not hasattr(mock.Mock, 'assert_not_called'): def __mock_assert_not_called(mmock): diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst index 3e2c9e31..2d8e2538 100644 --- a/doc/rtd/topics/capabilities.rst +++ b/doc/rtd/topics/capabilities.rst @@ -16,13 +16,15 @@ User configurability `Cloud-init`_ 's behavior can be configured via user-data. - User-data can be given by the user at instance launch time. + User-data can be given by the user at instance launch time. See + :ref:`user_data_formats` for acceptable user-data content. + This is done via the ``--user-data`` or ``--user-data-file`` argument to ec2-run-instances for example. -* Check your local clients documentation for how to provide a `user-data` - string or `user-data` file for usage by cloud-init on instance creation. +* Check your local client's documentation for how to provide a `user-data` + string or `user-data` file to cloud-init on instance creation. Feature detection @@ -166,6 +168,13 @@ likely be promoted to top-level subcommands when stable. validation is work in progress and supports a subset of cloud-config modules. + * ``cloud-init devel render``: Use cloud-init's jinja template render to + process **#cloud-config** or **custom-scripts**, injecting any variables + from ``/run/cloud-init/instance-data.json``. It accepts a user-data file + containing the jinja template header ``## template: jinja`` and renders + that content with any instance-data.json variables present. + + .. _cli_clean: cloud-init clean diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 83034589..14432e65 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -18,6 +18,8 @@ single way to access the different cloud systems methods to provide this data through the typical usage of subclasses. +.. _instance_metadata: + instance-data ------------- For reference, cloud-init stores all the metadata, vendordata and userdata @@ -110,6 +112,51 @@ Below is an instance-data.json example from an OpenStack instance: } } + +As of cloud-init v. 18.4, any values present in +``/run/cloud-init/instance-data.json`` can be used in cloud-init user data +scripts or cloud config data. This allows consumers to use cloud-init's +vendor-neutral, standardized metadata keys as well as datasource-specific +content for any scripts or cloud-config modules they are using. + +To use instance-data.json values in scripts and **#config-config** files the +user-data will need to contain the following header as the first line **## template: jinja**. Cloud-init will source all variables defined in +``/run/cloud-init/instance-data.json`` and allow scripts or cloud-config files +to reference those paths. Below are two examples:: + + * Cloud config calling home with the ec2 public hostname and avaliability-zone + ``` + ## template: jinja + #cloud-config + runcmd: + - echo 'EC2 public hostname allocated to instance: {{ ds.meta_data.public_hostname }}' > /tmp/instance_metadata + - echo 'EC2 avaiability zone: {{ v1.availability_zone }}' >> /tmp/instance_metadata + - curl -X POST -d '{"hostname": "{{ds.meta_data.public_hostname }}", "availability-zone": "{{ v1.availability_zone }}"}' https://example.com.com + ``` + + * Custom user script performing different operations based on region + ``` + ## template: jinja + #!/bin/bash + {% if v1.region == 'us-east-2' -%} + echo 'Installing custom proxies for {{ v1.region }} + sudo apt-get install my-xtra-fast-stack + {%- endif %} + ... + + ``` + +.. note:: + Trying to reference jinja variables that don't exist in + instance-data.json will result in warnings in ``/var/log/cloud-init.log`` + and the following string in your rendered user-data: + ``CI_MISSING_JINJA_VAR/``. + +.. note:: + To save time designing your user-data for a specific cloud's + instance-data.json, use the 'render' cloud-init command on an + instance booted on your favorite cloud. See :ref:`cli_devel` for more + information. Datasource API diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index 1b0ff366..15234d21 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -1,6 +1,8 @@ -******* -Formats -******* +.. _user_data_formats: + +***************** +User-Data Formats +***************** User data that will be acted upon by cloud-init must be in one of the following types. @@ -65,6 +67,11 @@ Typically used by those who just want to execute a shell script. Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME archive. +.. note:: + New in cloud-init v. 18.4: User-data scripts can also render cloud instance + metadata variables using jinja templating. See + :ref:`instance_metadata` for more information. + Example ------- @@ -103,12 +110,18 @@ These things include: - certain ssh keys should be imported - *and many more...* -**Note:** The file must be valid yaml syntax. +.. note:: + This file must be valid yaml syntax. See the :ref:`yaml_examples` section for a commented set of examples of supported cloud config formats. Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using a MIME archive. +.. note:: + New in cloud-init v. 18.4: Cloud config dta can also render cloud instance + metadata variables using jinja templating. See + :ref:`instance_metadata` for more information. + Upstart Job =========== diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 696db8dd..27458271 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -168,7 +168,7 @@ class CloudTestCase(unittest.TestCase): ' OS: %s not bionic or newer' % self.os_name) instance_data = json.loads(out) self.assertEqual( - ['ds/user-data'], instance_data['base64-encoded-keys']) + ['ds/user_data'], instance_data['base64_encoded_keys']) ds = instance_data.get('ds', {}) v1_data = instance_data.get('v1', {}) metadata = ds.get('meta-data', {}) @@ -214,8 +214,8 @@ class CloudTestCase(unittest.TestCase): instance_data = json.loads(out) v1_data = instance_data.get('v1', {}) self.assertEqual( - ['ds/user-data', 'ds/vendor-data'], - sorted(instance_data['base64-encoded-keys'])) + ['ds/user_data', 'ds/vendor_data'], + sorted(instance_data['base64_encoded_keys'])) self.assertEqual('nocloud', v1_data['cloud-name']) self.assertIsNone( v1_data['availability-zone'], @@ -249,7 +249,7 @@ class CloudTestCase(unittest.TestCase): instance_data = json.loads(out) v1_data = instance_data.get('v1', {}) self.assertEqual( - ['ds/user-data'], instance_data['base64-encoded-keys']) + ['ds/user_data'], instance_data['base64_encoded_keys']) self.assertEqual('nocloud', v1_data['cloud-name']) self.assertIsNone( v1_data['availability-zone'], diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 9751ed95..abe820e1 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -2,27 +2,34 @@ """Tests of the built-in user data handlers.""" +import copy import os import shutil import tempfile +from textwrap import dedent -try: - from unittest import mock -except ImportError: - import mock -from cloudinit.tests import helpers as test_helpers +from cloudinit.tests.helpers import ( + FilesystemMockingTestCase, CiTestCase, mock, skipUnlessJinja) from cloudinit import handlers from cloudinit import helpers from cloudinit import util -from cloudinit.handlers import upstart_job +from cloudinit.handlers.cloud_config import CloudConfigPartHandler +from cloudinit.handlers.jinja_template import ( + JinjaTemplatePartHandler, convert_jinja_instance_data, + render_jinja_payload) +from cloudinit.handlers.shell_script import ShellScriptPartHandler +from cloudinit.handlers.upstart_job import UpstartJobPartHandler from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE) -class TestBuiltins(test_helpers.FilesystemMockingTestCase): +class TestUpstartJobPartHandler(FilesystemMockingTestCase): + + mpath = 'cloudinit.handlers.upstart_job.' + def test_upstart_frequency_no_out(self): c_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, c_root) @@ -32,14 +39,13 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): 'cloud_dir': c_root, 'upstart_dir': up_root, }) - freq = PER_ALWAYS - h = upstart_job.UpstartJobPartHandler(paths) + h = UpstartJobPartHandler(paths) # No files should be written out when # the frequency is ! per-instance h.handle_part('', handlers.CONTENT_START, None, None, None) h.handle_part('blah', 'text/upstart-job', - 'test.conf', 'blah', freq) + 'test.conf', 'blah', frequency=PER_ALWAYS) h.handle_part('', handlers.CONTENT_END, None, None, None) self.assertEqual(0, len(os.listdir(up_root))) @@ -48,7 +54,6 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): # files should be written out when frequency is ! per-instance new_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, new_root) - freq = PER_INSTANCE self.patchOS(new_root) self.patchUtils(new_root) @@ -56,22 +61,297 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase): 'upstart_dir': "/etc/upstart", }) - upstart_job.SUITABLE_UPSTART = True util.ensure_dir("/run") util.ensure_dir("/etc/upstart") - with mock.patch.object(util, 'subp') as mockobj: - h = upstart_job.UpstartJobPartHandler(paths) - h.handle_part('', handlers.CONTENT_START, - None, None, None) - h.handle_part('blah', 'text/upstart-job', - 'test.conf', 'blah', freq) - h.handle_part('', handlers.CONTENT_END, - None, None, None) + with mock.patch(self.mpath + 'SUITABLE_UPSTART', return_value=True): + with mock.patch.object(util, 'subp') as m_subp: + h = UpstartJobPartHandler(paths) + h.handle_part('', handlers.CONTENT_START, + None, None, None) + h.handle_part('blah', 'text/upstart-job', + 'test.conf', 'blah', frequency=PER_INSTANCE) + h.handle_part('', handlers.CONTENT_END, + None, None, None) - self.assertEqual(len(os.listdir('/etc/upstart')), 1) + self.assertEqual(len(os.listdir('/etc/upstart')), 1) - mockobj.assert_called_once_with( + m_subp.assert_called_once_with( ['initctl', 'reload-configuration'], capture=False) + +class TestJinjaTemplatePartHandler(CiTestCase): + + with_logs = True + + mpath = 'cloudinit.handlers.jinja_template.' + + def setUp(self): + super(TestJinjaTemplatePartHandler, self).setUp() + self.tmp = self.tmp_dir() + self.run_dir = os.path.join(self.tmp, 'run_dir') + util.ensure_dir(self.run_dir) + self.paths = helpers.Paths({ + 'cloud_dir': self.tmp, 'run_dir': self.run_dir}) + + def test_jinja_template_part_handler_defaults(self): + """On init, paths are saved and subhandler types are empty.""" + h = JinjaTemplatePartHandler(self.paths) + self.assertEqual(['## template: jinja'], h.prefixes) + self.assertEqual(3, h.handler_version) + self.assertEqual(self.paths, h.paths) + self.assertEqual({}, h.sub_handlers) + + def test_jinja_template_part_handler_looks_up_sub_handler_types(self): + """When sub_handlers are passed, init lists types of subhandlers.""" + script_handler = ShellScriptPartHandler(self.paths) + cloudconfig_handler = CloudConfigPartHandler(self.paths) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler, cloudconfig_handler]) + self.assertItemsEqual( + ['text/cloud-config', 'text/cloud-config-jsonp', + 'text/x-shellscript'], + h.sub_handlers) + + def test_jinja_template_part_handler_looks_up_subhandler_types(self): + """When sub_handlers are passed, init lists types of subhandlers.""" + script_handler = ShellScriptPartHandler(self.paths) + cloudconfig_handler = CloudConfigPartHandler(self.paths) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler, cloudconfig_handler]) + self.assertItemsEqual( + ['text/cloud-config', 'text/cloud-config-jsonp', + 'text/x-shellscript'], + h.sub_handlers) + + def test_jinja_template_handle_noop_on_content_signals(self): + """Perform no part handling when content type is CONTENT_SIGNALS.""" + script_handler = ShellScriptPartHandler(self.paths) + + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + with mock.patch.object(script_handler, 'handle_part') as m_handle_part: + h.handle_part( + data='data', ctype=handlers.CONTENT_START, filename='part-1', + payload='## template: jinja\n#!/bin/bash\necho himom', + frequency='freq', headers='headers') + m_handle_part.assert_not_called() + + @skipUnlessJinja() + def test_jinja_template_handle_subhandler_v2_with_clean_payload(self): + """Call version 2 subhandler.handle_part with stripped payload.""" + script_handler = ShellScriptPartHandler(self.paths) + self.assertEqual(2, script_handler.handler_version) + + # Create required instance-data.json file + instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_data = {'topkey': 'echo himom'} + util.write_file(instance_json, util.json_dumps(instance_data)) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + with mock.patch.object(script_handler, 'handle_part') as m_part: + # ctype with leading '!' not in handlers.CONTENT_SIGNALS + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_START, + filename='part01', + payload='## template: jinja \t \n#!/bin/bash\n{{ topkey }}', + frequency='freq', headers='headers') + m_part.assert_called_once_with( + 'data', '!__begin__', 'part01', '#!/bin/bash\necho himom', 'freq') + + @skipUnlessJinja() + def test_jinja_template_handle_subhandler_v3_with_clean_payload(self): + """Call version 3 subhandler.handle_part with stripped payload.""" + cloudcfg_handler = CloudConfigPartHandler(self.paths) + self.assertEqual(3, cloudcfg_handler.handler_version) + + # Create required instance-data.json file + instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_data = {'topkey': {'sub': 'runcmd: [echo hi]'}} + util.write_file(instance_json, util.json_dumps(instance_data)) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[cloudcfg_handler]) + with mock.patch.object(cloudcfg_handler, 'handle_part') as m_part: + # ctype with leading '!' not in handlers.CONTENT_SIGNALS + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_END, + filename='part01', + payload='## template: jinja\n#cloud-config\n{{ topkey.sub }}', + frequency='freq', headers='headers') + m_part.assert_called_once_with( + 'data', '!__end__', 'part01', '#cloud-config\nruncmd: [echo hi]', + 'freq', 'headers') + + def test_jinja_template_handle_errors_on_missing_instance_data_json(self): + """If instance-data is absent, raise an error from handle_part.""" + script_handler = ShellScriptPartHandler(self.paths) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + with self.assertRaises(RuntimeError) as context_manager: + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_START, + filename='part01', + payload='## template: jinja \n#!/bin/bash\necho himom', + frequency='freq', headers='headers') + script_file = os.path.join(script_handler.script_dir, 'part01') + self.assertEqual( + 'Cannot render jinja template vars. Instance data not yet present' + ' at {}/instance-data.json'.format( + self.run_dir), str(context_manager.exception)) + self.assertFalse( + os.path.exists(script_file), + 'Unexpected file created %s' % script_file) + + @skipUnlessJinja() + def test_jinja_template_handle_renders_jinja_content(self): + """When present, render jinja variables from instance-data.json.""" + script_handler = ShellScriptPartHandler(self.paths) + instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_data = {'topkey': {'subkey': 'echo himom'}} + util.write_file(instance_json, util.json_dumps(instance_data)) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_START, + filename='part01', + payload=( + '## template: jinja \n' + '#!/bin/bash\n' + '{{ topkey.subkey|default("nosubkey") }}'), + frequency='freq', headers='headers') + script_file = os.path.join(script_handler.script_dir, 'part01') + self.assertNotIn( + 'Instance data not yet present at {}/instance-data.json'.format( + self.run_dir), + self.logs.getvalue()) + self.assertEqual( + '#!/bin/bash\necho himom', util.load_file(script_file)) + + @skipUnlessJinja() + def test_jinja_template_handle_renders_jinja_content_missing_keys(self): + """When specified jinja variable is undefined, log a warning.""" + script_handler = ShellScriptPartHandler(self.paths) + instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_data = {'topkey': {'subkey': 'echo himom'}} + util.write_file(instance_json, util.json_dumps(instance_data)) + h = JinjaTemplatePartHandler( + self.paths, sub_handlers=[script_handler]) + h.handle_part( + data='data', ctype="!" + handlers.CONTENT_START, + filename='part01', + payload='## template: jinja \n#!/bin/bash\n{{ goodtry }}', + frequency='freq', headers='headers') + script_file = os.path.join(script_handler.script_dir, 'part01') + self.assertTrue( + os.path.exists(script_file), + 'Missing expected file %s' % script_file) + self.assertIn( + "WARNING: Could not render jinja template variables in file" + " 'part01': 'goodtry'\n", + self.logs.getvalue()) + + +class TestConvertJinjaInstanceData(CiTestCase): + + def test_convert_instance_data_hyphens_to_underscores(self): + """Replace hyphenated keys with underscores in instance-data.""" + data = {'hyphenated-key': 'hyphenated-val', + 'underscore_delim_key': 'underscore_delimited_val'} + expected_data = {'hyphenated_key': 'hyphenated-val', + 'underscore_delim_key': 'underscore_delimited_val'} + self.assertEqual( + expected_data, + convert_jinja_instance_data(data=data)) + + def test_convert_instance_data_promotes_versioned_keys_to_top_level(self): + """Any versioned keys are promoted as top-level keys + + This provides any cloud-init standardized keys up at a top-level to + allow ease of reference for users. Intsead of v1.availability_zone, + the name availability_zone can be used in templates. + """ + data = {'ds': {'dskey1': 1, 'dskey2': 2}, + 'v1': {'v1key1': 'v1.1'}, + 'v2': {'v2key1': 'v2.1'}} + expected_data = copy.deepcopy(data) + expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'}) + + converted_data = convert_jinja_instance_data(data=data) + self.assertItemsEqual( + ['ds', 'v1', 'v2', 'v1key1', 'v2key1'], converted_data.keys()) + self.assertEqual( + expected_data, + converted_data) + + def test_convert_instance_data_most_recent_version_of_promoted_keys(self): + """The most-recent versioned key value is promoted to top-level.""" + data = {'v1': {'key1': 'old v1 key1', 'key2': 'old v1 key2'}, + 'v2': {'key1': 'newer v2 key1', 'key3': 'newer v2 key3'}, + 'v3': {'key1': 'newest v3 key1'}} + expected_data = copy.deepcopy(data) + expected_data.update( + {'key1': 'newest v3 key1', 'key2': 'old v1 key2', + 'key3': 'newer v2 key3'}) + + converted_data = convert_jinja_instance_data(data=data) + self.assertEqual( + expected_data, + converted_data) + + def test_convert_instance_data_decodes_decode_paths(self): + """Any decode_paths provided are decoded by convert_instance_data.""" + data = {'key1': {'subkey1': 'aGkgbW9t'}, 'key2': 'aGkgZGFk'} + expected_data = copy.deepcopy(data) + expected_data['key1']['subkey1'] = 'hi mom' + + converted_data = convert_jinja_instance_data( + data=data, decode_paths=('key1/subkey1',)) + self.assertEqual( + expected_data, + converted_data) + + +class TestRenderJinjaPayload(CiTestCase): + + with_logs = True + + @skipUnlessJinja() + def test_render_jinja_payload_logs_jinja_vars_on_debug(self): + """When debug is True, log jinja varables available.""" + payload = ( + '## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}') + instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'} + expected_log = dedent("""\ + DEBUG: Converted jinja variables + { + "hostname": "foo", + "instance_id": "iid", + "v1": { + "hostname": "foo" + } + } + """) + self.assertEqual( + render_jinja_payload( + payload=payload, payload_fn='myfile', + instance_data=instance_data, debug=True), + '#!/bin/sh\necho hi from foo') + self.assertEqual(expected_log, self.logs.getvalue()) + + @skipUnlessJinja() + def test_render_jinja_payload_replaces_missing_variables_and_warns(self): + """Warn on missing jinja variables and replace the absent variable.""" + payload = ( + '## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}') + instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'} + self.assertEqual( + render_jinja_payload( + payload=payload, payload_fn='myfile', + instance_data=instance_data), + '#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE') + expected_log = ( + 'WARNING: Could not render jinja template variables in file' + " 'myfile': 'NOTHERE'") + self.assertIn(expected_log, self.logs.getvalue()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py index ced05a8d..d854afcb 100644 --- a/tests/unittests/test_handler/test_handler_etc_hosts.py +++ b/tests/unittests/test_handler/test_handler_etc_hosts.py @@ -49,6 +49,7 @@ class TestHostsFile(t_help.FilesystemMockingTestCase): if '192.168.1.1\tblah.blah.us\tblah' not in contents: self.assertIsNone('Default etc/hosts content modified') + @t_help.skipUnlessJinja() def test_write_etc_hosts_suse_template(self): cfg = { 'manage_etc_hosts': 'template', diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 6fe3659d..0f22e579 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -3,6 +3,7 @@ from cloudinit.config import cc_ntp from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) + from cloudinit.tests.helpers import ( CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 20c87efa..c36e6eb0 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -21,6 +21,9 @@ except ImportError: class TestTemplates(test_helpers.CiTestCase): + + with_logs = True + jinja_utf8 = b'It\xe2\x80\x99s not ascii, {{name}}\n' jinja_utf8_rbob = b'It\xe2\x80\x99s not ascii, bob\n'.decode('utf-8') @@ -124,6 +127,13 @@ $a,$b''' self.add_header("jinja", self.jinja_utf8), {"name": "bob"}), self.jinja_utf8_rbob) + def test_jinja_nonascii_render_undefined_variables_to_default_py3(self): + """Test py3 jinja render_to_string with undefined variable default.""" + self.assertEqual( + templater.render_string( + self.add_header("jinja", self.jinja_utf8), {}), + self.jinja_utf8_rbob.replace('bob', 'CI_MISSING_JINJA_VAR/name')) + def test_jinja_nonascii_render_to_file(self): """Test jinja render_to_file of a filename with non-ascii content.""" tmpl_fn = self.tmp_path("j-render-to-file.template") @@ -144,5 +154,18 @@ $a,$b''' result = templater.render_from_file(tmpl_fn, {"name": "bob"}) self.assertEqual(result, self.jinja_utf8_rbob) + @test_helpers.skipIfJinja() + def test_jinja_warns_on_missing_dep_and_uses_basic_renderer(self): + """Test jinja render_from_file will fallback to basic renderer.""" + tmpl_fn = self.tmp_path("j-render-from-file.template") + write_file(tmpl_fn, omode="wb", + content=self.add_header( + "jinja", self.jinja_utf8).encode('utf-8')) + result = templater.render_from_file(tmpl_fn, {"name": "bob"}) + self.assertEqual(result, self.jinja_utf8.decode()) + self.assertIn( + 'WARNING: Jinja not available as the selected renderer for desired' + ' template, reverting to the basic renderer.', + self.logs.getvalue()) # vi: ts=4 expandtab -- cgit v1.2.3 From c75c582ed1824dc3ff39cefdbddccfc2924b868c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 12 Sep 2018 18:44:19 +0000 Subject: OpenStack: fix bug causing 'latest' version to be used from network. Cloud-init was reading a list of versions from the OpenStack metadata service (http://169.254.169.254/openstack/) and attempt to select the newest known supported version. The problem was that the list of versions was not being decoded, so we were comparing a list of bytes (found versions) to a list of strings (known versions). LP: #1792157 --- cloudinit/sources/helpers/openstack.py | 2 +- tests/unittests/test_datasource/test_openstack.py | 30 +++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 8f9c1441..e3d372b6 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -439,7 +439,7 @@ class MetadataReader(BaseReader): return self._versions found = [] version_path = self._path_join(self.base_path, "openstack") - content = self._path_read(version_path) + content = self._path_read(version_path, decode=True) for line in content.splitlines(): line = line.strip() if not line: diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 6e1e971b..ab5d3adc 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -555,4 +555,34 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_proc_env.assert_called_with(1) +class TestMetadataReader(test_helpers.HttprettyTestCase): + """Test the MetadataReader.""" + burl = 'http://169.254.169.254/' + + def register(self, path, body=None, status=200): + hp.register_uri( + hp.GET, self.burl + "openstack" + path, status=status, body=body) + + def register_versions(self, versions): + self.register("", '\n'.join(versions).encode('utf-8')) + + def test__find_working_version(self): + """Test a working version ignores unsupported.""" + unsup = "2016-11-09" + self.register_versions( + [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup, + openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LIBERTY, + openstack.MetadataReader(self.burl)._find_working_version()) + + def test__find_working_version_uses_latest(self): + """'latest' should be used if no supported versions.""" + unsup1, unsup2 = ("2016-11-09", '2017-06-06') + self.register_versions([unsup1, unsup2, openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LATEST, + openstack.MetadataReader(self.burl)._find_working_version()) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 4361e0e42a34ebf9f49dde2356b7261715cf0cd5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 13 Sep 2018 18:44:20 +0000 Subject: OpenStack: support reading of newer versions of metdata. Mark as supported for reading some newer versions of openstack metadata: 2016-06-30 : Newton one 2016-10-06 : Newton two 2017-02-22 : Ocata 2018-08-27 : Rocky --- cloudinit/sources/helpers/openstack.py | 16 ++++++ tests/unittests/test_datasource/test_openstack.py | 66 +++++++++++++++++++++-- 2 files changed, 79 insertions(+), 3 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index e3d372b6..1e651440 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -38,17 +38,33 @@ KEY_COPIES = ( ('local-hostname', 'hostname', False), ('instance-id', 'uuid', True), ) + +# Versions and names taken from nova source nova/api/metadata/base.py OS_LATEST = 'latest' OS_FOLSOM = '2012-08-10' OS_GRIZZLY = '2013-04-04' OS_HAVANA = '2013-10-17' OS_LIBERTY = '2015-10-15' +# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan) +OS_NEWTON_ONE = '2016-06-30' +# NEWTON_TWO adds vendor_data2.json (vendordata-reboot) +OS_NEWTON_TWO = '2016-10-06' +# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan) +OS_OCATA = '2017-02-22' +# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs) +OS_ROCKY = '2018-08-27' + + # keep this in chronological order. new supported versions go at the end. OS_VERSIONS = ( OS_FOLSOM, OS_GRIZZLY, OS_HAVANA, OS_LIBERTY, + OS_NEWTON_ONE, + OS_NEWTON_TWO, + OS_OCATA, + OS_ROCKY, ) PHYSICAL_TYPES = ( diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index ab5d3adc..a731f1ed 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -12,7 +12,7 @@ import re from cloudinit.tests import helpers as test_helpers from six.moves.urllib.parse import urlparse -from six import StringIO +from six import StringIO, text_type from cloudinit import helpers from cloudinit import settings @@ -558,13 +558,36 @@ class TestDetectOpenStack(test_helpers.CiTestCase): class TestMetadataReader(test_helpers.HttprettyTestCase): """Test the MetadataReader.""" burl = 'http://169.254.169.254/' + md_base = { + 'availability_zone': 'myaz1', + 'hostname': 'sm-foo-test.novalocal', + "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}], + 'launch_index': 0, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'project_id': '6a103f813b774b9fb15a4fcd36e1c056', + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} def register(self, path, body=None, status=200): + content = (body if not isinstance(body, text_type) + else body.encode('utf-8')) hp.register_uri( - hp.GET, self.burl + "openstack" + path, status=status, body=body) + hp.GET, self.burl + "openstack" + path, status=status, + body=content) def register_versions(self, versions): - self.register("", '\n'.join(versions).encode('utf-8')) + self.register("", '\n'.join(versions)) + self.register("/", '\n'.join(versions)) + + def register_version(self, version, data): + content = '\n'.join(sorted(data.keys())) + self.register(version, content) + self.register(version + "/", content) + for path, content in data.items(): + self.register("/%s/%s" % (version, path), content) + self.register("/%s/%s" % (version, path), content) + if 'user_data' not in data: + self.register("/%s/user_data" % version, "nodata", status=404) def test__find_working_version(self): """Test a working version ignores unsupported.""" @@ -584,5 +607,42 @@ class TestMetadataReader(test_helpers.HttprettyTestCase): openstack.OS_LATEST, openstack.MetadataReader(self.burl)._find_working_version()) + def test_read_v2_os_ocata(self): + """Validate return value of read_v2 for os_ocata data.""" + md = copy.deepcopy(self.md_base) + md['devices'] = [] + network_data = {'links': [], 'networks': [], 'services': []} + vendor_data = {} + vendor_data2 = {"static": {}} + + data = { + 'meta_data.json': json.dumps(md), + 'network_data.json': json.dumps(network_data), + 'vendor_data.json': json.dumps(vendor_data), + 'vendor_data2.json': json.dumps(vendor_data2), + } + + self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST]) + self.register_version(openstack.OS_OCATA, data) + + mock_read_ec2 = test_helpers.mock.MagicMock( + return_value={'instance-id': 'unused-ec2'}) + expected_md = copy.deepcopy(md) + expected_md.update( + {'instance-id': md['uuid'], 'local-hostname': md['hostname']}) + expected = { + 'userdata': '', # Annoying, no user-data results in empty string. + 'version': 2, + 'metadata': expected_md, + 'vendordata': vendor_data, + 'networkdata': network_data, + 'ec2-metadata': mock_read_ec2.return_value, + 'files': {}, + } + reader = openstack.MetadataReader(self.burl) + reader._read_ec2_metadata = mock_read_ec2 + self.assertEqual(expected, reader.read_v2()) + self.assertEqual(1, mock_read_ec2.call_count) + # vi: ts=4 expandtab -- cgit v1.2.3 From c6cfed7fb58be76a3f95963a5de469aa03542cd3 Mon Sep 17 00:00:00 2001 From: Fabian Wiesel Date: Fri, 14 Sep 2018 19:33:01 +0000 Subject: OpenStack: Support setting mac address on bond. Fix a bug where setting of mac address on a bond device was ignored when provided in OpenStack network_config.json. LP: #1682064 --- cloudinit/sources/helpers/openstack.py | 2 ++ tests/unittests/test_datasource/test_configdrive.py | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'tests/unittests') diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index d6f39a1e..76a6e310 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -604,6 +604,8 @@ def convert_net_json(network_json=None, known_macs=None): cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) elif link['type'] in ['bond']: params = {} + if link_mac_addr: + params['mac_address'] = link_mac_addr for k, v in link.items(): if k == 'bond_links': continue diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index b0abfc5f..231619c9 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -136,6 +136,7 @@ NETWORK_DATA_3 = { ] } +BOND_MAC = "fa:16:3e:b3:72:36" NETWORK_DATA_BOND = { "services": [ {"type": "dns", "address": "1.1.1.191"}, @@ -163,7 +164,7 @@ NETWORK_DATA_BOND = { {"bond_links": ["eth0", "eth1"], "bond_miimon": 100, "bond_mode": "4", "bond_xmit_hash_policy": "layer3+4", - "ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "ethernet_mac_address": BOND_MAC, "id": "bond0", "type": "bond"}, {"ethernet_mac_address": "fa:16:3e:b3:72:30", "id": "vlan2", "type": "vlan", "vlan_id": 602, @@ -691,6 +692,9 @@ class TestConvertNetworkData(CiTestCase): self.assertIn("auto oeth0", eni_rendering) self.assertIn("auto oeth1", eni_rendering) self.assertIn("auto bond0", eni_rendering) + # The bond should have the given mac address + pos = eni_rendering.find("auto bond0") + self.assertIn(BOND_MAC, eni_rendering[pos:]) def test_vlan(self): # light testing of vlan config conversion and eni rendering -- cgit v1.2.3 From 37078046cb191234982d95ad4876af4ed8f862f4 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Mon, 17 Sep 2018 11:37:27 +0000 Subject: tests: Add mock for util.get_hostname. At present the host network settings bleed into the test environment causing the test test_handler_apt_source_v3 to fail if the host has a domain setting other then localdomain. LP: #1792799 --- tests/unittests/test_handler/test_handler_apt_source_v3.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tests/unittests') diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index a81c67c7..90fe6eed 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -949,7 +949,8 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") self.assertEqual( orig, cc_apt_configure.disable_suites(["proposed"], orig, rel)) - def test_apt_v3_mirror_search_dns(self): + @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain') + def test_apt_v3_mirror_search_dns(self, m_get_hostname): """test_apt_v3_mirror_search_dns - Test searching dns patterns""" pmir = "phit" smir = "shit" -- cgit v1.2.3 From 98d18c31c8759add858096dea53bc093c7cc9caa Mon Sep 17 00:00:00 2001 From: Thomas Berger Date: Tue, 18 Sep 2018 07:16:40 +0000 Subject: net_util: ensure static configs have netmask in translate_network result If a DataSource provides a network configuration in version 2 and runs on a distro which does not have a network renderer class in use, then the conversion of V2 to eni results in static ip configurations with subnet prefix-length (192.168.23.1/24) rather than explicit netmask value. When sending such a config to net_util.translate_network the resulting dictionary is missing the 'netmask' key for static configured addresses breaking network configurations on multiple distributions. This patch detects static ip configurations using prefix-length and converts the format into the previous 'address' and 'netmask' parts to keep compatibility for these distribtuions until they move to the v2 network configuration. LP: #1792454 --- cloudinit/distros/net_util.py | 19 +++++++++++++ tests/unittests/test_distros/test_netconfig.py | 39 ++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) (limited to 'tests/unittests') diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index 1ce1aa71..edfcd99d 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -67,6 +67,10 @@ # } # } +from cloudinit.net.network_state import ( + net_prefix_to_ipv4_mask, mask_and_ipv4_to_bcast_addr) + + def translate_network(settings): # Get the standard cmd, args from the ubuntu format entries = [] @@ -134,6 +138,21 @@ def translate_network(settings): val = info[k].strip().lower() if val: iface_info[k] = val + # handle static ip configurations using + # ipaddress/prefix-length format + if 'address' in iface_info: + if 'netmask' not in iface_info: + # check if the address has a network prefix + addr, _, prefix = iface_info['address'].partition('/') + if prefix: + iface_info['netmask'] = ( + net_prefix_to_ipv4_mask(prefix)) + iface_info['address'] = addr + # if we set the netmask, we also can set the broadcast + iface_info['broadcast'] = ( + mask_and_ipv4_to_bcast_addr( + iface_info['netmask'], addr)) + # Name server info provided?? if 'dns-nameservers' in info: iface_info['dns-nameservers'] = info['dns-nameservers'].split() diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 740fb76c..9bbff451 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -34,6 +34,19 @@ auto eth1 iface eth1 inet dhcp ''' +BASE_NET_CFG_FROM_V2 = ''' +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 192.168.1.5/24 + gateway 192.168.1.254 + +auto eth1 +iface eth1 inet dhcp +''' + BASE_NET_CFG_IPV6 = ''' auto lo iface lo inet loopback @@ -262,6 +275,32 @@ hn0: flags=8843 metric 0 mtu 1500 '''), results[rc_conf]) self.assertEqual(0o644, get_mode(rc_conf, tmpd)) + def test_simple_write_freebsd_from_v2eni(self): + fbsd_distro = self._get_distro('freebsd') + + rc_conf = '/etc/rc.conf' + read_bufs = { + rc_conf: 'initial-rc-conf-not-validated', + '/etc/resolv.conf': 'initial-resolv-conf-not-validated', + } + + tmpd = self.tmp_dir() + populate_dir(tmpd, read_bufs) + with self.reRooted(tmpd): + with mock.patch("cloudinit.distros.freebsd.util.subp", + return_value=('vtnet0', '')): + fbsd_distro.apply_network(BASE_NET_CFG_FROM_V2, False) + results = dir2dict(tmpd) + + self.assertIn(rc_conf, results) + self.assertCfgEquals( + dedent('''\ + ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" + ifconfig_vtnet1="DHCP" + defaultrouter="192.168.1.254" + '''), results[rc_conf]) + self.assertEqual(0o644, get_mode(rc_conf, tmpd)) + def test_apply_network_config_fallback_freebsd(self): fbsd_distro = self._get_distro('freebsd') -- cgit v1.2.3 From c3680475f9c97028d75a8f1bb58139ddd47d00ed Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Sep 2018 09:08:37 +0000 Subject: Remove dead-code _write_network distro implementations. Any distro that has a '_write_nework_config' method should no longer get their _write_network called at all. So lets drop that code and raise a RuntimeError any time we got there. Replace the one caller of 'apply_network' (legacy openstack path) with a call to apply_network_config after converting the ENI to network config. --- cloudinit/distros/__init__.py | 7 +- cloudinit/distros/debian.py | 5 -- cloudinit/distros/opensuse.py | 47 ----------- cloudinit/distros/rhel.py | 49 ----------- cloudinit/sources/DataSourceConfigDrive.py | 2 +- tests/unittests/test_distros/test_netconfig.py | 111 ------------------------- 6 files changed, 4 insertions(+), 217 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index b8a48e85..ac150bef 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -74,11 +74,10 @@ class Distro(object): def install_packages(self, pkglist): raise NotImplementedError() - @abc.abstractmethod def _write_network(self, settings): - # In the future use the http://fedorahosted.org/netcf/ - # to write this blob out in a distro format - raise NotImplementedError() + raise RuntimeError( + "Legacy function '_write_network' was called in distro '%s'.\n" + "_write_network_config needs implementation.\n" % self.name) def _write_network_config(self, settings): raise NotImplementedError() diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 33cc0bf1..d517fb88 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -109,11 +109,6 @@ class Distro(distros.Distro): self.update_package_sources() self.package_command('install', pkgs=pkglist) - def _write_network(self, settings): - # this is a legacy method, it will always write eni - util.write_file(self.network_conf_fn["eni"], settings) - return ['all'] - def _write_network_config(self, netconfig): _maybe_remove_legacy_eth0() return self._supported_write_network_config(netconfig) diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 1fe896aa..1bfe0478 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -16,7 +16,6 @@ from cloudinit import helpers from cloudinit import log as logging from cloudinit import util -from cloudinit.distros import net_util from cloudinit.distros import rhel_util as rhutil from cloudinit.settings import PER_INSTANCE @@ -172,52 +171,6 @@ class Distro(distros.Distro): conf.set_hostname(hostname) util.write_file(out_fn, str(conf), 0o644) - def _write_network(self, settings): - # Convert debian settings to ifcfg format - entries = net_util.translate_network(settings) - LOG.debug("Translated ubuntu style network settings %s into %s", - settings, entries) - # Make the intermediate format as the suse format... - nameservers = [] - searchservers = [] - dev_names = entries.keys() - for (dev, info) in entries.items(): - net_fn = self.network_script_tpl % (dev) - route_fn = self.route_conf_tpl % (dev) - mode = None - if info.get('auto', None): - mode = 'auto' - else: - mode = 'manual' - bootproto = info.get('bootproto', None) - gateway = info.get('gateway', None) - net_cfg = { - 'BOOTPROTO': bootproto, - 'BROADCAST': info.get('broadcast'), - 'GATEWAY': gateway, - 'IPADDR': info.get('address'), - 'LLADDR': info.get('hwaddress'), - 'NETMASK': info.get('netmask'), - 'STARTMODE': mode, - 'USERCONTROL': 'no' - } - if dev != 'lo': - net_cfg['ETHTOOL_OPTIONS'] = '' - else: - net_cfg['FIREWALL'] = 'no' - rhutil.update_sysconfig_file(net_fn, net_cfg, True) - if gateway and bootproto == 'static': - default_route = 'default %s' % gateway - util.write_file(route_fn, default_route, 0o644) - if 'dns-nameservers' in info: - nameservers.extend(info['dns-nameservers']) - if 'dns-search' in info: - searchservers.extend(info['dns-search']) - if nameservers or searchservers: - rhutil.update_resolve_conf_file(self.resolve_conf_fn, - nameservers, searchservers) - return dev_names - def _write_network_config(self, netconfig): return self._supported_write_network_config(netconfig) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index ff513438..f55d96f7 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -13,7 +13,6 @@ from cloudinit import helpers from cloudinit import log as logging from cloudinit import util -from cloudinit.distros import net_util from cloudinit.distros import rhel_util from cloudinit.settings import PER_INSTANCE @@ -65,54 +64,6 @@ class Distro(distros.Distro): def _write_network_config(self, netconfig): return self._supported_write_network_config(netconfig) - def _write_network(self, settings): - # TODO(harlowja) fix this... since this is the ubuntu format - entries = net_util.translate_network(settings) - LOG.debug("Translated ubuntu style network settings %s into %s", - settings, entries) - # Make the intermediate format as the rhel format... - nameservers = [] - searchservers = [] - dev_names = entries.keys() - use_ipv6 = False - for (dev, info) in entries.items(): - net_fn = self.network_script_tpl % (dev) - net_cfg = { - 'DEVICE': dev, - 'NETMASK': info.get('netmask'), - 'IPADDR': info.get('address'), - 'BOOTPROTO': info.get('bootproto'), - 'GATEWAY': info.get('gateway'), - 'BROADCAST': info.get('broadcast'), - 'MACADDR': info.get('hwaddress'), - 'ONBOOT': _make_sysconfig_bool(info.get('auto')), - } - if info.get('inet6'): - use_ipv6 = True - net_cfg.update({ - 'IPV6INIT': _make_sysconfig_bool(True), - 'IPV6ADDR': info.get('ipv6').get('address'), - 'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'), - }) - rhel_util.update_sysconfig_file(net_fn, net_cfg) - if 'dns-nameservers' in info: - nameservers.extend(info['dns-nameservers']) - if 'dns-search' in info: - searchservers.extend(info['dns-search']) - if nameservers or searchservers: - rhel_util.update_resolve_conf_file(self.resolve_conf_fn, - nameservers, searchservers) - if dev_names: - net_cfg = { - 'NETWORKING': _make_sysconfig_bool(True), - } - # If IPv6 interface present, enable ipv6 networking - if use_ipv6: - net_cfg['NETWORKING_IPV6'] = _make_sysconfig_bool(True) - net_cfg['IPV6_AUTOCONF'] = _make_sysconfig_bool(False) - rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg) - return dev_names - def apply_locale(self, locale, out_fn=None): if self.uses_systemd(): if not out_fn: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 4cb28977..664dc4b7 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -196,7 +196,7 @@ def on_first_boot(data, distro=None, network=True): net_conf = data.get("network_config", '') if net_conf and distro: LOG.warning("Updating network interfaces from config drive") - distro.apply_network(net_conf) + distro.apply_network_config(eni.convert_eni_data(net_conf)) write_injected_files(data.get('files')) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 9bbff451..6e339355 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -367,16 +367,6 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): self.assertEqual(expected, results[cfgpath]) self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - def test_simple_write_ub(self): - expected_cfgs = { - self.eni_path(): BASE_NET_CFG, - } - - # ub_distro.apply_network(BASE_NET_CFG, False) - self._apply_and_verify_eni(self.distro.apply_network, - BASE_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - def test_apply_network_config_eni_ub(self): expected_cfgs = { self.eni_path(): V1_NET_CFG_OUTPUT, @@ -467,35 +457,6 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): self.assertCfgEquals(expected, results[cfgpath]) self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - def test_simple_write_rh(self): - expected_cfgs = { - self.ifcfg_path('lo'): dedent("""\ - DEVICE="lo" - ONBOOT=yes - """), - self.ifcfg_path('eth0'): dedent("""\ - DEVICE="eth0" - BOOTPROTO="static" - NETMASK="255.255.255.0" - IPADDR="192.168.1.5" - ONBOOT=yes - GATEWAY="192.168.1.254" - BROADCAST="192.168.1.0" - """), - self.ifcfg_path('eth1'): dedent("""\ - DEVICE="eth1" - BOOTPROTO="dhcp" - ONBOOT=yes - """), - self.control_path(): dedent("""\ - NETWORKING=yes - """), - } - # rh_distro.apply_network(BASE_NET_CFG, False) - self._apply_and_verify(self.distro.apply_network, - BASE_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - def test_apply_network_config_rh(self): expected_cfgs = { self.ifcfg_path('eth0'): dedent("""\ @@ -527,47 +488,6 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): V1_NET_CFG, expected_cfgs=expected_cfgs.copy()) - def test_write_ipv6_rhel(self): - expected_cfgs = { - self.ifcfg_path('lo'): dedent("""\ - DEVICE="lo" - ONBOOT=yes - """), - self.ifcfg_path('eth0'): dedent("""\ - DEVICE="eth0" - BOOTPROTO="static" - NETMASK="255.255.255.0" - IPADDR="192.168.1.5" - ONBOOT=yes - GATEWAY="192.168.1.254" - BROADCAST="192.168.1.0" - IPV6INIT=yes - IPV6ADDR="2607:f0d0:1002:0011::2" - IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" - """), - self.ifcfg_path('eth1'): dedent("""\ - DEVICE="eth1" - BOOTPROTO="static" - NETMASK="255.255.255.0" - IPADDR="192.168.1.6" - ONBOOT=no - GATEWAY="192.168.1.254" - BROADCAST="192.168.1.0" - IPV6INIT=yes - IPV6ADDR="2607:f0d0:1002:0011::3" - IPV6_DEFAULTGW="2607:f0d0:1002:0011::1" - """), - self.control_path(): dedent("""\ - NETWORKING=yes - NETWORKING_IPV6=yes - IPV6_AUTOCONF=no - """), - } - # rh_distro.apply_network(BASE_NET_CFG_IPV6, False) - self._apply_and_verify(self.distro.apply_network, - BASE_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - def test_apply_network_config_ipv6_rh(self): expected_cfgs = { self.ifcfg_path('eth0'): dedent("""\ @@ -627,37 +547,6 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): self.assertCfgEquals(expected, results[cfgpath]) self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - def test_simple_write_opensuse(self): - """Opensuse network rendering writes appropriate sysconfig files.""" - expected_cfgs = { - self.ifcfg_path('lo'): dedent(''' - STARTMODE="auto" - USERCONTROL="no" - FIREWALL="no" - '''), - self.ifcfg_path('eth0'): dedent(''' - BOOTPROTO="static" - BROADCAST="192.168.1.0" - GATEWAY="192.168.1.254" - IPADDR="192.168.1.5" - NETMASK="255.255.255.0" - STARTMODE="auto" - USERCONTROL="no" - ETHTOOL_OPTIONS="" - '''), - self.ifcfg_path('eth1'): dedent(''' - BOOTPROTO="dhcp" - STARTMODE="auto" - USERCONTROL="no" - ETHTOOL_OPTIONS="" - ''') - } - - # distro.apply_network(BASE_NET_CFG, False) - self._apply_and_verify(self.distro.apply_network, - BASE_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - def test_apply_network_config_opensuse(self): """Opensuse uses apply_network_config and renders sysconfig""" expected_cfgs = { -- cgit v1.2.3 From e7b0e5f72e134779cfe22cd07b09a42c22d2bfe1 Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Wed, 26 Sep 2018 17:59:07 +0000 Subject: Add support for Infiniband network interfaces (IPoIB). OpenStack ironic references Infiniband interfaces via a 6 byte 'MAC address' formed from bytes 13-15 and 18-20 of interface's hardware address. This address is used as the ethernet_mac_address of Infiniband links in network_data.json in configdrives generated by OpenStack nova. We can use this address to map links in network_data.json to their corresponding interface names. When generating interface configuration files, we need to use the interface's full hardware address as the HWADDR, rather than the 6 byte MAC address provided by network_data.json. This change allows IB interfaces to be referenced in this dual mode - by MAC address and hardware address, depending on the context. Support TYPE=InfiniBand for sysconfig configuration of IB interfaces. --- cloudinit/net/__init__.py | 38 ++++++++++++++++ cloudinit/net/network_state.py | 4 ++ cloudinit/net/sysconfig.py | 14 ++++++ cloudinit/sources/helpers/openstack.py | 11 +++++ tests/unittests/test_net.py | 81 +++++++++++++++++++++++++++++++++- 5 files changed, 147 insertions(+), 1 deletion(-) (limited to 'tests/unittests') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 5e87bcad..f83d3681 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -569,6 +569,20 @@ def get_interface_mac(ifname): return read_sys_net_safe(ifname, path) +def get_ib_interface_hwaddr(ifname, ethernet_format): + """Returns the string value of an Infiniband interface's hardware + address. If ethernet_format is True, an Ethernet MAC-style 6 byte + representation of the address will be returned. + """ + # Type 32 is Infiniband. + if read_sys_net_safe(ifname, 'type') == '32': + mac = get_interface_mac(ifname) + if mac and ethernet_format: + # Use bytes 13-15 and 18-20 of the hardware address. + mac = mac[36:-14] + mac[51:] + return mac + + def get_interfaces_by_mac(): """Build a dictionary of tuples {mac: name}. @@ -580,6 +594,15 @@ def get_interfaces_by_mac(): "duplicate mac found! both '%s' and '%s' have mac '%s'" % (name, ret[mac], mac)) ret[mac] = name + # Try to get an Infiniband hardware address (in 6 byte Ethernet format) + # for the interface. + ib_mac = get_ib_interface_hwaddr(name, True) + if ib_mac: + if ib_mac in ret: + raise RuntimeError( + "duplicate mac found! both '%s' and '%s' have mac '%s'" % + (name, ret[ib_mac], ib_mac)) + ret[ib_mac] = name return ret @@ -607,6 +630,21 @@ def get_interfaces(): return ret +def get_ib_hwaddrs_by_interface(): + """Build a dictionary mapping Infiniband interface names to their hardware + address.""" + ret = {} + for name, _, _, _ in get_interfaces(): + ib_mac = get_ib_interface_hwaddr(name, False) + if ib_mac: + if ib_mac in ret: + raise RuntimeError( + "duplicate mac found! both '%s' and '%s' have mac '%s'" % + (name, ret[ib_mac], ib_mac)) + ret[name] = ib_mac + return ret + + class EphemeralIPv4Network(object): """Context manager which sets up temporary static network configuration. diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 72c803eb..f76e508a 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -483,6 +483,10 @@ class NetworkStateInterpreter(object): interfaces.update({iface['name']: iface}) + @ensure_command_keys(['name']) + def handle_infiniband(self, command): + self.handle_physical(command) + @ensure_command_keys(['address']) def handle_nameserver(self, command): dns = self._network_state.get('dns') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 66e970e0..9c16d3a7 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -174,6 +174,7 @@ class NetInterface(ConfigMap): 'ethernet': 'Ethernet', 'bond': 'Bond', 'bridge': 'Bridge', + 'infiniband': 'InfiniBand', } def __init__(self, iface_name, base_sysconf_dir, templates, @@ -568,6 +569,18 @@ class Renderer(renderer.Renderer): cls._render_subnets(iface_cfg, iface_subnets) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) + @classmethod + def _render_ib_interfaces(cls, network_state, iface_contents): + ib_filter = renderer.filter_by_type('infiniband') + for iface in network_state.iter_interfaces(ib_filter): + iface_name = iface['name'] + iface_cfg = iface_contents[iface_name] + iface_cfg.kind = 'infiniband' + iface_subnets = iface.get("subnets", []) + route_cfg = iface_cfg.routes + cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) + @classmethod def _render_sysconfig(cls, base_sysconf_dir, network_state, templates=None): @@ -586,6 +599,7 @@ class Renderer(renderer.Renderer): cls._render_bond_interfaces(network_state, iface_contents) cls._render_vlan_interfaces(network_state, iface_contents) cls._render_bridge_interfaces(network_state, iface_contents) + cls._render_ib_interfaces(network_state, iface_contents) contents = {} for iface_name, iface_cfg in iface_contents.items(): if iface_cfg or iface_cfg.children: diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 76a6e310..9c29ceac 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -675,6 +675,17 @@ def convert_net_json(network_json=None, known_macs=None): else: cfg[key] = fmt % link_id_info[target]['name'] + # Infiniband interfaces may be referenced in network_data.json by a 6 byte + # Ethernet MAC-style address, and we use that address to look up the + # interface name above. Now ensure that the hardware address is set to the + # full 20 byte address. + ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface() + if ib_known_hwaddrs: + for cfg in config: + if cfg['name'] in ib_known_hwaddrs: + cfg['mac_address'] = ib_known_hwaddrs[cfg['name']] + cfg['type'] = 'infiniband' + for service in services: cfg = service cfg.update({'type': 'nameserver'}) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index f3165da9..5d9c7d92 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3260,11 +3260,15 @@ class TestGetInterfacesByMac(CiTestCase): def _se_interface_has_own_mac(self, name): return name in self.data['own_macs'] + def _se_get_ib_interface_hwaddr(self, name, ethernet_format): + ib_hwaddr = self.data.get('ib_hwaddr', {}) + return ib_hwaddr.get(name, {}).get(ethernet_format) + def _mock_setup(self): self.data = copy.deepcopy(self._data) self.data['devices'] = set(list(self.data['macs'].keys())) mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', - 'interface_has_own_mac', 'is_vlan') + 'interface_has_own_mac', 'is_vlan', 'get_ib_interface_hwaddr') self.mocks = {} for n in mocks: m = mock.patch('cloudinit.net.' + n, @@ -3338,6 +3342,20 @@ class TestGetInterfacesByMac(CiTestCase): ret = net.get_interfaces_by_mac() self.assertEqual('lo', ret[empty_mac]) + def test_ib(self): + ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' + ib_addr_eth_format = '00:11:22:33:44:56' + self._mock_setup() + self.data['devices'] = ['enp0s1', 'ib0'] + self.data['own_macs'].append('ib0') + self.data['macs']['ib0'] = ib_addr + self.data['ib_hwaddr'] = {'ib0': {True: ib_addr_eth_format, + False: ib_addr}} + result = net.get_interfaces_by_mac() + expected = {'aa:aa:aa:aa:aa:01': 'enp0s1', + ib_addr_eth_format: 'ib0', ib_addr: 'ib0'} + self.assertEqual(expected, result) + class TestInterfacesSorting(CiTestCase): @@ -3352,6 +3370,67 @@ class TestInterfacesSorting(CiTestCase): ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) +class TestGetIBHwaddrsByInterface(CiTestCase): + + _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' + _ib_addr_eth_format = '00:11:22:33:44:56' + _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1', + 'bridge1-nic', 'tun0', 'ib0'], + 'bonds': ['bond1'], + 'bridges': ['bridge1'], + 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', 'ib0'], + 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', + 'enp0s2': 'aa:aa:aa:aa:aa:02', + 'bond1': 'aa:aa:aa:aa:aa:01', + 'bridge1': 'aa:aa:aa:aa:aa:03', + 'bridge1-nic': 'aa:aa:aa:aa:aa:03', + 'tun0': None, + 'ib0': _ib_addr}, + 'ib_hwaddr': {'ib0': {True: _ib_addr_eth_format, + False: _ib_addr}}} + data = {} + + def _mock_setup(self): + self.data = copy.deepcopy(self._data) + mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', + 'interface_has_own_mac', 'get_ib_interface_hwaddr') + self.mocks = {} + for n in mocks: + m = mock.patch('cloudinit.net.' + n, + side_effect=getattr(self, '_se_' + n)) + self.addCleanup(m.stop) + self.mocks[n] = m.start() + + def _se_get_devicelist(self): + return self.data['devices'] + + def _se_get_interface_mac(self, name): + return self.data['macs'][name] + + def _se_is_bridge(self, name): + return name in self.data['bridges'] + + def _se_interface_has_own_mac(self, name): + return name in self.data['own_macs'] + + def _se_get_ib_interface_hwaddr(self, name, ethernet_format): + ib_hwaddr = self.data.get('ib_hwaddr', {}) + return ib_hwaddr.get(name, {}).get(ethernet_format) + + def test_ethernet(self): + self._mock_setup() + self.data['devices'].remove('ib0') + result = net.get_ib_hwaddrs_by_interface() + expected = {} + self.assertEqual(expected, result) + + def test_ib(self): + self._mock_setup() + result = net.get_ib_hwaddrs_by_interface() + expected = {'ib0': self._ib_addr} + self.assertEqual(expected, result) + + def _gzip_data(data): with io.BytesIO() as iobuf: gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf) -- cgit v1.2.3 From facb92c5b55f4d27ee971eaf49c580fbf92ac2c3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 26 Sep 2018 21:29:27 +0000 Subject: lxd: adjust to snap installed lxd. Relax expectation on path to lxc and lxd. The deb path still does install them in /usr/bin/ but that is overly pedantic. Add a 'lxd waitready' (present since lxd 0.5) to wait until lxd is ready before operating on it. --- cloudinit/config/cc_lxd.py | 5 ++++- tests/cloud_tests/testcases/modules/lxd_bridge.py | 4 ++-- tests/cloud_tests/testcases/modules/lxd_dir.py | 4 ++-- tests/unittests/test_handler/test_handler_lxd.py | 12 ++++++------ 4 files changed, 14 insertions(+), 11 deletions(-) (limited to 'tests/unittests') diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index a604825a..24a8ebea 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -104,6 +104,7 @@ def handle(name, cfg, cloud, log, args): 'network_address', 'network_port', 'storage_backend', 'storage_create_device', 'storage_create_loop', 'storage_pool', 'trust_password') + util.subp(['lxd', 'waitready', '--timeout=300']) cmd = ['lxd', 'init', '--auto'] for k in init_keys: if init_cfg.get(k): @@ -260,7 +261,9 @@ def bridge_to_cmd(bridge_cfg): def _lxc(cmd): - env = {'LC_ALL': 'C'} + env = {'LC_ALL': 'C', + 'HOME': os.environ.get('HOME', '/root'), + 'USER': os.environ.get('USER', 'root')} util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.py b/tests/cloud_tests/testcases/modules/lxd_bridge.py index c0262ba3..f6011dee 100644 --- a/tests/cloud_tests/testcases/modules/lxd_bridge.py +++ b/tests/cloud_tests/testcases/modules/lxd_bridge.py @@ -10,12 +10,12 @@ class TestLxdBridge(base.CloudTestCase): def test_lxd(self): """Test lxd installed.""" out = self.get_data_file('lxd') - self.assertIn('/usr/bin/lxd', out) + self.assertIn('/lxd', out) def test_lxc(self): """Test lxc installed.""" out = self.get_data_file('lxc') - self.assertIn('/usr/bin/lxc', out) + self.assertIn('/lxc', out) def test_bridge(self): """Test bridge config.""" diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.py b/tests/cloud_tests/testcases/modules/lxd_dir.py index 1495674e..26a3db39 100644 --- a/tests/cloud_tests/testcases/modules/lxd_dir.py +++ b/tests/cloud_tests/testcases/modules/lxd_dir.py @@ -10,11 +10,11 @@ class TestLxdDir(base.CloudTestCase): def test_lxd(self): """Test lxd installed.""" out = self.get_data_file('lxd') - self.assertIn('/usr/bin/lxd', out) + self.assertIn('/lxd', out) def test_lxc(self): """Test lxc installed.""" out = self.get_data_file('lxc') - self.assertIn('/usr/bin/lxc', out) + self.assertIn('/lxc', out) # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index 4dd7e09f..2478ebc4 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -43,12 +43,12 @@ class TestLxd(t_help.CiTestCase): self.assertTrue(mock_util.which.called) # no bridge config, so maybe_cleanup should not be called. self.assertFalse(m_maybe_clean.called) - init_call = mock_util.subp.call_args_list[0][0][0] - self.assertEqual(init_call, - ['lxd', 'init', '--auto', - '--network-address=0.0.0.0', - '--storage-backend=zfs', - '--storage-pool=poolname']) + self.assertEqual( + [mock.call(['lxd', 'waitready', '--timeout=300']), + mock.call( + ['lxd', 'init', '--auto', '--network-address=0.0.0.0', + '--storage-backend=zfs', '--storage-pool=poolname'])], + mock_util.subp.call_args_list) @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") -- cgit v1.2.3