From 0a71d5a870b416f2c86c8bc196004bb3fc0768a0 Mon Sep 17 00:00:00 2001 From: Hongjiang Zhang Date: Fri, 13 Jan 2017 15:08:22 +0800 Subject: FreeBSD: improvements and fixes for use on Azure This patch targets to make FreeBSD 10.3 or 11 work on Azure. The modifications abide by the rule of: * making as less modification as possible * delegate to the distro or datasource where possible. The main modifications are: 1. network configuration improvements, and movement into distro path. 2. Fix setting of password. Password setting through "pw" can only work through pipe. 3. Add 'root:wheel' to syslog_fix_perms field. 4. Support resizing default file system (ufs) 5. copy cloud.cfg for freebsd to /etc/cloud/cloud.cfg rather than /usr/local/etc/cloud/cloud.cfg. 6. Azure specific changes: a. When reading the azure endpoint, search in a different path and read a different option name (option-245 vs. unknown-245). so, the lease file path should be generated according to platform. b. adjust the handling of ephemeral mounts for ufs filesystem and for finding the ephemeral device. c. fix mounting of cdrom LP: #1636345 --- tests/unittests/test_datasource/test_azure.py | 65 ++++++++++++++++++++++ .../unittests/test_datasource/test_azure_helper.py | 4 +- tests/unittests/test_datasource/test_cloudstack.py | 5 ++ 3 files changed, 72 insertions(+), 2 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 8d22bb59..e6b0dcb4 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -3,6 +3,8 @@ from cloudinit import helpers from cloudinit.util import b64e, decode_binary, load_file from cloudinit.sources import DataSourceAzure +from cloudinit.util import find_freebsd_part +from cloudinit.util import get_path_dev_freebsd from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest @@ -95,6 +97,41 @@ class TestAzureDataSource(TestCase): for module, name, new in patches: self.patches.enter_context(mock.patch.object(module, name, new)) + def _get_mockds(self): + mod = DataSourceAzure + sysctl_out = "dev.storvsc.3.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.2.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.1.%pnpinfo: "\ + "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\ + "deviceid=00000000-0001-8899-0000-000000000000\n" + camctl_devbus = """ +scbus0 on ata0 bus 0 +scbus1 on ata1 bus 0 +scbus2 on blkvsc0 bus 0 +scbus3 on blkvsc1 bus 0 +scbus4 on storvsc2 bus 0 +scbus5 on storvsc3 bus 0 +scbus-1 on xpt0 bus 0 + """ + camctl_dev = """ + at scbus1 target 0 lun 0 (cd0,pass0) + at scbus2 target 0 lun 0 (da0,pass1) + at scbus3 target 1 lun 0 (da1,pass2) + """ + self.apply_patches([ + (mod, 'get_dev_storvsc_sysctl', mock.MagicMock( + return_value=sysctl_out)), + (mod, 'get_camcontrol_dev_bus', mock.MagicMock( + return_value=camctl_devbus)), + (mod, 'get_camcontrol_dev', mock.MagicMock( + return_value=camctl_dev)) + ]) + return mod + def _get_ds(self, data, agent_command=None): def dsdevs(): @@ -177,6 +214,34 @@ class TestAzureDataSource(TestCase): return raise AssertionError("XML is the same") + def test_get_resource_disk(self): + ds = self._get_mockds() + dev = ds.get_resource_disk_on_freebsd(1) + self.assertEqual("da1", dev) + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_on_Azure(self, mock_subp): + glabel_out = ''' +gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 + label/rootfs N/A da0p2 + label/swap N/A da0p3 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/label/rootfs") + self.assertEqual("da0p2", res) + + def test_get_path_dev_freebsd_on_Azure(self): + mnt_list = ''' +/dev/label/rootfs / ufs rw 1 1 +devfs /dev devfs rw,multilabel 0 0 +fdescfs /dev/fd fdescfs rw 0 0 +/dev/da1s1 /mnt/resource ufs rw 2 2 +''' + with mock.patch.object(os.path, 'exists', + return_value=True): + res = get_path_dev_freebsd('/etc', mnt_list) + self.assertNotEqual(res, None) + def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index aafdebd7..b2d2971b 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -3,7 +3,6 @@ import os from cloudinit.sources.helpers import azure as azure_helper - from ..helpers import ExitStack, mock, TestCase @@ -72,10 +71,11 @@ class TestFindEndpoint(TestCase): @staticmethod def _build_lease_content(encoded_address): + endpoint = azure_helper._get_dhcp_endpoint_option_name() return '\n'.join([ 'lease {', ' interface "eth0";', - ' option unknown-245 {0};'.format(encoded_address), + ' option {0} {1};'.format(endpoint, encoded_address), '}']) def test_from_dhcp_client(self): diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index e93d28de..1d3d2f19 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -15,6 +15,11 @@ class TestCloudStackPasswordFetching(TestCase): mod_name = 'cloudinit.sources.DataSourceCloudStack' self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) + default_gw = "192.201.20.0" + mod_name = 'cloudinit.sources.DataSourceCloudStack.get_default_gateway' + get_default_gw = mock.MagicMock(return_value=default_gw) + self.patches.enter_context( + mock.patch(mod_name, get_default_gw)) def _set_password_server_response(self, response_string): subp = mock.MagicMock(return_value=(response_string, '')) -- cgit v1.2.3 From 31b6f173280fcc8e9be2732ae2e9b6f6c89679d4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 28 Apr 2017 09:23:25 -0400 Subject: Azure: fix reformatting of ephemeral disks on resize to large types. Large instance types have a different disk format on the newly partitioned ephemeral drive. So we have to adjust the logic in the Azure datasource to recognize that a disk with 2 partitions and an empty ntfs filesystem on the second one is acceptable. This also adjusts the datasources's builtin fs_setup config to remove the 'replace_fs' entry. This entry was previously ignored, and confusing. I've clarified the doc on that also. LP: #1686514 --- cloudinit/config/cc_disk_setup.py | 19 +- cloudinit/sources/DataSourceAzure.py | 84 ++++--- tests/unittests/test_datasource/test_azure.py | 265 ++++++++++++++++++--- .../test_handler/test_handler_disk_setup.py | 16 ++ 4 files changed, 307 insertions(+), 77 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 6f827ddc..29eb5dd8 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -68,6 +68,9 @@ specified using ``filesystem``. Using ``overwrite: true`` for filesystems is dangerous and can lead to data loss, so double check the entry in ``fs_setup``. +.. note:: + ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``. + **Internal name:** ``cc_disk_setup`` **Module frequency:** per instance @@ -127,7 +130,7 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Partitioning disks: %s", str(disk_setup)) for disk, definition in disk_setup.items(): if not isinstance(definition, dict): - log.warn("Invalid disk definition for %s" % disk) + log.warning("Invalid disk definition for %s" % disk) continue try: @@ -144,7 +147,7 @@ def handle(_name, cfg, cloud, log, _args): update_fs_setup_devices(fs_setup, cloud.device_name_to_device) for definition in fs_setup: if not isinstance(definition, dict): - log.warn("Invalid file system definition: %s" % definition) + log.warning("Invalid file system definition: %s" % definition) continue try: @@ -199,8 +202,13 @@ def update_fs_setup_devices(disk_setup, tformer): definition['_origname'] = origname definition['device'] = tformed - if part and 'partition' in definition: - definition['_partition'] = definition['partition'] + if part: + # In origname with .N, N overrides 'partition' key. + if 'partition' in definition: + LOG.warning("Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", part, origname, + definition) + definition['_partition'] = definition['partition'] definition['partition'] = part @@ -849,7 +857,8 @@ def mkfs(fs_cfg): # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device %s has %s %s", device, check_label, check_fstype) + LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", + device, check_label, check_fstype) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 5254e18a..44857c09 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -196,8 +196,7 @@ BUILTIN_CLOUD_CONFIG = { 'overwrite': True}, }, 'fs_setup': [{'filesystem': DEFAULT_FS, - 'device': 'ephemeral0.1', - 'replace_fs': 'ntfs'}], + 'device': 'ephemeral0.1'}], } DS_CFG_PATH = ['datasource', DS_NAME] @@ -413,56 +412,71 @@ class DataSourceAzureNet(sources.DataSource): return +def _partitions_on_device(devpath, maxnum=16): + # return a list of tuples (ptnum, path) for each part on devpath + for suff in ("-part", "p", ""): + found = [] + for pnum in range(1, maxnum): + ppath = devpath + suff + str(pnum) + if os.path.exists(ppath): + found.append((pnum, os.path.realpath(ppath))) + if found: + return found + return [] + + +def _has_ntfs_filesystem(devpath): + ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) + LOG.debug('ntfs_devices found = %s', ntfs_devices) + return os.path.realpath(devpath) in ntfs_devices + + def can_dev_be_reformatted(devpath): - # determine if the ephemeral block device path devpath - # is newly formatted after a resize. + """Determine if block device devpath is newly formatted ephemeral. + + A newly formatted disk will: + a.) have a partition table (dos or gpt) + b.) have 1 partition that is ntfs formatted, or + have 2 partitions with the second partition ntfs formatted. + (larger instances with >2TB ephemeral disk have gpt, and will + have a microsoft reserved partition as part 1. LP: #1686514) + c.) the ntfs partition will have no files other than possibly + 'dataloss_warning_readme.txt'""" if not os.path.exists(devpath): return False, 'device %s does not exist' % devpath - realpath = os.path.realpath(devpath) - LOG.debug('Resolving realpath of %s -> %s', devpath, realpath) - - # it is possible that the block device might exist, but the kernel - # have not yet read the partition table and sent events. we udevadm settle - # to hope to resolve that. Better here would probably be to test and see, - # and then settle if we didn't find anything and try again. - if util.which("udevadm"): - util.subp(["udevadm", "settle"]) + LOG.debug('Resolving realpath of %s -> %s', devpath, + os.path.realpath(devpath)) # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource # where partitions are "1" or "-part1" or "p1" - part1path = None - for suff in ("-part", "p", ""): - cand = devpath + suff + "1" - if os.path.exists(cand): - if os.path.exists(devpath + suff + "2"): - msg = ('device %s had more than 1 partition: %s, %s' % - devpath, cand, devpath + suff + "2") - return False, msg - part1path = cand - break - - if part1path is None: + partitions = _partitions_on_device(devpath) + if len(partitions) == 0: return False, 'device %s was not partitioned' % devpath + elif len(partitions) > 2: + msg = ('device %s had 3 or more partitions: %s' % + (devpath, ' '.join([p[1] for p in partitions]))) + return False, msg + elif len(partitions) == 2: + cand_part, cand_path = partitions[1] + else: + cand_part, cand_path = partitions[0] - real_part1path = os.path.realpath(part1path) - ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) - LOG.debug('ntfs_devices found = %s', ntfs_devices) - if real_part1path not in ntfs_devices: - msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' % - (part1path, real_part1path, devpath)) + if not _has_ntfs_filesystem(cand_path): + msg = ('partition %s (%s) on device %s was not ntfs formatted' % + (cand_part, cand_path, devpath)) return False, msg def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) - bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' % - (part1path, real_part1path, devpath)) + bmsg = ('partition %s (%s) on device %s was ntfs formatted' % + (cand_part, cand_path, devpath)) try: - file_count = util.mount_cb(part1path, count_files) + file_count = util.mount_cb(cand_path, count_files) except util.MountFailedError as e: - return False, bmsg + ' but mount of %s failed: %s' % (part1path, e) + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) if file_count != 0: return False, bmsg + ' but had %d files on it.' % file_count diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index e6b0dcb4..67cddeb9 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,12 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import helpers -from cloudinit.util import b64e, decode_binary, load_file -from cloudinit.sources import DataSourceAzure +from cloudinit.util import b64e, decode_binary, load_file, write_file +from cloudinit.sources import DataSourceAzure as dsaz from cloudinit.util import find_freebsd_part from cloudinit.util import get_path_dev_freebsd -from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest +from ..helpers import (CiTestCase, TestCase, populate_dir, mock, + ExitStack, PY26, SkipTest) import crypt import os @@ -98,7 +99,6 @@ class TestAzureDataSource(TestCase): self.patches.enter_context(mock.patch.object(module, name, new)) def _get_mockds(self): - mod = DataSourceAzure sysctl_out = "dev.storvsc.3.%pnpinfo: "\ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" @@ -123,14 +123,14 @@ scbus-1 on xpt0 bus 0 at scbus3 target 1 lun 0 (da1,pass2) """ self.apply_patches([ - (mod, 'get_dev_storvsc_sysctl', mock.MagicMock( + (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( return_value=sysctl_out)), - (mod, 'get_camcontrol_dev_bus', mock.MagicMock( + (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( return_value=camctl_devbus)), - (mod, 'get_camcontrol_dev', mock.MagicMock( + (dsaz, 'get_camcontrol_dev', mock.MagicMock( return_value=camctl_dev)) ]) - return mod + return dsaz def _get_ds(self, data, agent_command=None): @@ -152,8 +152,7 @@ scbus-1 on xpt0 bus 0 populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) - mod = DataSourceAzure - mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.get_metadata_from_fabric = mock.MagicMock(return_value={ 'public-keys': [], @@ -162,19 +161,19 @@ scbus-1 on xpt0 bus 0 self.instance_id = 'test-instance-id' self.apply_patches([ - (mod, 'list_possible_azure_ds_devs', dsdevs), - (mod, 'invoke_agent', _invoke_agent), - (mod, 'wait_for_files', _wait_for_files), - (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), - (mod, 'perform_hostname_bounce', mock.MagicMock()), - (mod, 'get_hostname', mock.MagicMock()), - (mod, 'set_hostname', mock.MagicMock()), - (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric), - (mod.util, 'read_dmi_data', mock.MagicMock( + (dsaz, 'list_possible_azure_ds_devs', dsdevs), + (dsaz, 'invoke_agent', _invoke_agent), + (dsaz, 'wait_for_files', _wait_for_files), + (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), + (dsaz, 'perform_hostname_bounce', mock.MagicMock()), + (dsaz, 'get_hostname', mock.MagicMock()), + (dsaz, 'set_hostname', mock.MagicMock()), + (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), + (dsaz.util, 'read_dmi_data', mock.MagicMock( return_value=self.instance_id)), ]) - dsrc = mod.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -418,7 +417,7 @@ fdescfs /dev/fd fdescfs rw 0 0 cfg = dsrc.get_config_obj() self.assertEqual(dsrc.device_name_to_device("ephemeral0"), - DataSourceAzure.RESOURCE_DISK_PATH) + dsaz.RESOURCE_DISK_PATH) assert 'disk_setup' in cfg assert 'fs_setup' in cfg self.assertIsInstance(cfg['disk_setup'], dict) @@ -468,14 +467,13 @@ fdescfs /dev/fd fdescfs rw 0 0 # Make sure that the redacted password on disk is not used by CI self.assertNotEqual(dsrc.cfg.get('password'), - DataSourceAzure.DEF_PASSWD_REDACTION) + dsaz.DEF_PASSWD_REDACTION) # Make sure that the password was really encrypted et = ET.fromstring(on_disk_ovf) for elem in et.iter(): if 'UserPassword' in elem.tag: - self.assertEqual(DataSourceAzure.DEF_PASSWD_REDACTION, - elem.text) + self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) def test_ovf_env_arrives_in_waagent_dir(self): xml = construct_valid_ovf_env(data={}, userdata="FOODATA") @@ -524,17 +522,17 @@ class TestAzureBounce(TestCase): def mock_out_azure_moving_parts(self): self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'invoke_agent')) + mock.patch.object(dsaz, 'invoke_agent')) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'wait_for_files')) + mock.patch.object(dsaz, 'wait_for_files')) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', + mock.patch.object(dsaz, 'list_possible_azure_ds_devs', mock.MagicMock(return_value=[]))) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', + mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(DataSourceAzure.util, 'read_dmi_data', + mock.patch.object(dsaz.util, 'read_dmi_data', mock.MagicMock(return_value='test-instance-id'))) def setUp(self): @@ -543,13 +541,13 @@ class TestAzureBounce(TestCase): self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') self.paths = helpers.Paths({'cloud_dir': self.tmp}) self.addCleanup(shutil.rmtree, self.tmp) - DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.patches = ExitStack() self.mock_out_azure_moving_parts() self.get_hostname = self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'get_hostname')) + mock.patch.object(dsaz, 'get_hostname')) self.set_hostname = self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'set_hostname')) + mock.patch.object(dsaz, 'set_hostname')) self.subp = self.patches.enter_context( mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) @@ -560,7 +558,7 @@ class TestAzureBounce(TestCase): if ovfcontent is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': ovfcontent}) - dsrc = DataSourceAzure.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzureNet( {}, distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -673,7 +671,7 @@ class TestAzureBounce(TestCase): def test_default_bounce_command_used_by_default(self): cmd = 'default-bounce-command' - DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd + dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) self._get_ds(data, agent_command=['not', '__builtin__']).get_data() @@ -701,15 +699,208 @@ class TestAzureBounce(TestCase): class TestReadAzureOvf(TestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) - self.assertRaises(DataSourceAzure.BrokenAzureDataSource, - DataSourceAzure.read_azure_ovf, invalid_xml) + self.assertRaises(dsaz.BrokenAzureDataSource, + dsaz.read_azure_ovf, invalid_xml) def test_load_with_pubkeys(self): mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] content = construct_valid_ovf_env(pubkeys=pubkeys) - (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) + (_md, _ud, cfg) = dsaz.read_azure_ovf(content) for mypk in mypklist: self.assertIn(mypk, cfg['_pubkeys']) + +class TestCanDevBeReformatted(CiTestCase): + warning_file = 'dataloss_warning_readme.txt' + + def _domock(self, mockpath, sattr=None): + patcher = mock.patch(mockpath) + setattr(self, sattr, patcher.start()) + self.addCleanup(patcher.stop) + + def setUp(self): + super(TestCanDevBeReformatted, self).setUp() + + def patchup(self, devs): + bypath = {} + for path, data in devs.items(): + bypath[path] = data + if 'realpath' in data: + bypath[data['realpath']] = data + for ppath, pdata in data.get('partitions', {}).items(): + bypath[ppath] = pdata + if 'realpath' in data: + bypath[pdata['realpath']] = pdata + + def realpath(d): + return bypath[d].get('realpath', d) + + def partitions_on_device(devpath): + parts = bypath.get(devpath, {}).get('partitions', {}) + ret = [] + for path, data in parts.items(): + ret.append((data.get('num'), realpath(path))) + # return sorted by partition number + return sorted(ret, key=lambda d: d[0]) + + def mount_cb(device, callback): + p = self.tmp_dir() + for f in bypath.get(device).get('files', []): + write_file(os.path.join(p, f), content=f) + return callback(p) + + def has_ntfs_fs(device): + return bypath.get(device, {}).get('fs') == 'ntfs' + + p = 'cloudinit.sources.DataSourceAzure' + self._domock(p + "._partitions_on_device", 'm_partitions_on_device') + self._domock(p + "._has_ntfs_filesystem", 'm_has_ntfs_filesystem') + self._domock(p + ".util.mount_cb", 'm_mount_cb') + self._domock(p + ".os.path.realpath", 'm_realpath') + self._domock(p + ".os.path.exists", 'm_exists') + + self.m_exists.side_effect = lambda p: p in bypath + self.m_realpath.side_effect = realpath + self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs + self.m_mount_cb.side_effect = mount_cb + self.m_partitions_on_device.side_effect = partitions_on_device + + def test_three_partitions_is_false(self): + """A disk with 3 partitions can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2}, + '/dev/sda3': {'num': 3}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("3 or more", msg.lower()) + + def test_no_partitions_is_false(self): + """A disk with no partitions can not be formatted.""" + self.patchup({'/dev/sda': {}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("not partitioned", msg.lower()) + + def test_two_partitions_not_ntfs_false(self): + """2 partitions and 2nd not ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("not ntfs", msg.lower()) + + def test_two_partitions_ntfs_populated_false(self): + """2 partitions and populated ntfs fs on 2nd can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', + 'files': ['secret.txt']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("files on it", msg.lower()) + + def test_two_partitions_ntfs_empty_is_true(self): + """2 partitions and empty ntfs fs on 2nd can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_not_ntfs_false(self): + """1 partition witih fs other than ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'zfs'}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("not ntfs", msg.lower()) + + def test_one_partition_ntfs_populated_false(self): + """1 mountable ntfs partition with many files can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['file1.txt', 'file2.exe']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("files on it", msg.lower()) + + def test_one_partition_ntfs_empty_is_true(self): + """1 mountable ntfs partition and no files can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): + """1 mountable ntfs partition and only warn file can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_through_realpath_is_true(self): + """A symlink to a device with 1 ntfs partition can be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath) + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_three_partition_through_realpath_is_false(self): + """A symlink to a device with 3 partitions can not be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'}, + epath + '-part2': {'num': 2, 'fs': 'ext3', + 'realpath': '/dev/sdb2'}, + epath + '-part3': {'num': 3, 'fs': 'ext', + 'realpath': '/dev/sdb3'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath) + self.assertEqual(False, value) + self.assertIn("3 or more", msg.lower()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index 9f00d46a..68fc6aae 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -151,6 +151,22 @@ class TestUpdateFsSetupDevices(TestCase): 'filesystem': 'xfs' }, fs_setup) + def test_dotted_devname_populates_partition(self): + fs_setup = { + 'device': 'ephemeral0.1', + 'label': 'test2', + 'filesystem': 'xfs' + } + cc_disk_setup.update_fs_setup_devices([fs_setup], + lambda device: device) + self.assertEqual({ + '_origname': 'ephemeral0.1', + 'device': 'ephemeral0', + 'partition': '1', + 'label': 'test2', + 'filesystem': 'xfs' + }, fs_setup) + @mock.patch('cloudinit.config.cc_disk_setup.find_device_node', return_value=('/dev/xdb1', False)) -- cgit v1.2.3 From afdddf8eea34866b43d1fc92624f9ac175802f36 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 18 May 2017 15:07:55 -0400 Subject: cloudstack: fix tests to avoid accessing /var/lib/NetworkManager on centos/fedora/rhel, /var/lib/NetworkManager has mode 700, causing the cloudstack unit tests to fail when run as a non-root user. This mocks out get_latest_lease so that we no longer try to read dhcp lease information during the unit tests. --- tests/unittests/test_datasource/test_cloudstack.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index 1d3d2f19..e94aad61 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -16,10 +16,15 @@ class TestCloudStackPasswordFetching(TestCase): self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) default_gw = "192.201.20.0" - mod_name = 'cloudinit.sources.DataSourceCloudStack.get_default_gateway' + get_latest_lease = mock.MagicMock(return_value=None) + self.patches.enter_context(mock.patch( + 'cloudinit.sources.DataSourceCloudStack.get_latest_lease', + get_latest_lease)) + get_default_gw = mock.MagicMock(return_value=default_gw) - self.patches.enter_context( - mock.patch(mod_name, get_default_gw)) + self.patches.enter_context(mock.patch( + 'cloudinit.sources.DataSourceCloudStack.get_default_gateway', + get_default_gw)) def _set_password_server_response(self, response_string): subp = mock.MagicMock(return_value=(response_string, '')) -- cgit v1.2.3 From 3d97b29bd71b9de5fb14d8bd320c20545b88a81b Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Wed, 26 Apr 2017 14:07:53 -0600 Subject: DigitalOcean: remove routes except for the public interface. Previously, the datasource for DigitalOcean allowed for a gateway on each NIC. As a result, on Ubuntu 16.04, networking.service was broken. For 17.04 and later, Ubuntu _replaces_ the default gateway with the second gateway on 'ifup' after reboot. DigitalOcean is looking at changing the meta-data, however, this will result in another version of the meta-data JSON. LP: #1681531. --- cloudinit/sources/helpers/digitalocean.py | 2 +- .../unittests/test_datasource/test_digitalocean.py | 25 ++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index 257989e8..693f8d5c 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -162,7 +162,7 @@ def convert_network_configuration(config, dns_servers): continue sub_part = _get_subnet_part(raw_subnet) - if netdef in ('private', 'anchor_ipv4', 'anchor_ipv6'): + if nic_type != "public" or "anchor" in netdef: del sub_part['gateway'] subnets.append(sub_part) diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index a11166a9..e97a679a 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -1,6 +1,8 @@ # Copyright (C) 2014 Neal Shrader # # Author: Neal Shrader +# Author: Ben Howard +# Author: Scott Moser # # This file is part of cloud-init. See LICENSE file for license information. @@ -262,6 +264,29 @@ class TestNetworkConvert(TestCase): print(json.dumps(subn, indent=3)) return subn + def test_correct_gateways_defined(self): + """test to make sure the eth0 ipv4 and ipv6 gateways are defined""" + netcfg = self._get_networking() + gateways = [] + for nic_def in netcfg.get('config'): + if nic_def.get('type') != 'physical': + continue + for subn in nic_def.get('subnets'): + if 'gateway' in subn: + gateways.append(subn.get('gateway')) + + # we should have two gateways, one ipv4 and ipv6 + self.assertEqual(len(gateways), 2) + + # make that the ipv6 gateway is there + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIn(ipv4_def.get('gateway'), gateways) + + # make sure the the ipv6 gateway is there + ipv6_def = meta_def.get('ipv6') + self.assertIn(ipv6_def.get('gateway'), gateways) + def test_public_interface_defined(self): """test that the public interface is defined as eth0""" (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') -- cgit v1.2.3 From 2825a917e5fa130818c0d77219f32961b99a057f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 May 2017 13:09:26 -0400 Subject: flake8: move the pinned version of flake8 up to 3.3.0 This just moves flake8 and related tools up to newer versions and fixes the complaints associated with that. We added to the list of flake8 ignores: H102: do not put vim info in source files H304: no relative imports Also updates and pins the following in the flake8 environment: pep8: 1.7.0 => drop (although hacking still pulls it in). pyflakes 1.1.0 => 1.5.0 hacking 0.10.2 => 0.13.0 flake8 2.5.4 => 3.3.0 pycodestyle none => 2.3.1 --- cloudinit/sources/DataSourceAzure.py | 1 + test-requirements.txt | 8 ++++---- tests/unittests/test_datasource/test_altcloud.py | 2 +- tests/unittests/test_datasource/test_azure.py | 2 +- tests/unittests/test_datasource/test_maas.py | 2 +- tests/unittests/test_datasource/test_opennebula.py | 4 ++-- tests/unittests/test_datasource/test_openstack.py | 4 ++-- tests/unittests/test_datasource/test_ovf.py | 2 +- tests/unittests/test_distros/test_resolv.py | 2 +- tests/unittests/test_handler/test_handler_power_state.py | 4 ++-- tests/unittests/test_handler/test_handler_snappy.py | 4 ++-- tests/unittests/test_helpers.py | 2 +- tests/unittests/test_net.py | 2 +- tests/unittests/test_util.py | 10 +++++----- tools/hacking.py | 5 +++-- tools/mock-meta.py | 2 +- tools/net-convert.py | 2 +- tox.ini | 2 +- 18 files changed, 31 insertions(+), 29 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 44857c09..b9458ffa 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -163,6 +163,7 @@ def get_resource_disk_on_freebsd(port_id): return devname return None + # update the FreeBSD specific information if util.is_FreeBSD(): DEFAULT_PRIMARY_NIC = 'hn0' diff --git a/test-requirements.txt b/test-requirements.txt index 0e7fc8fb..1b39ea5c 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -13,7 +13,7 @@ contextlib2 setuptools # Used for syle checking -pep8==1.7.0 -pyflakes==1.1.0 -flake8==2.5.4 -hacking==0.10.2 +pycodestyle==2.3.1 +pyflakes==1.5.0 +flake8==3.3.0 +hacking==0.13.0 diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index 63a2b04d..b6d4a453 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -419,7 +419,7 @@ class TestReadUserDataCallback(TestCase): '''Test read_user_data_callback() no files are found.''' _remove_user_data_files(self.mount_dir) - self.assertEqual(None, dsac.read_user_data_callback(self.mount_dir)) + self.assertIsNone(dsac.read_user_data_callback(self.mount_dir)) def force_arch(arch=None): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 67cddeb9..852ec703 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -239,7 +239,7 @@ fdescfs /dev/fd fdescfs rw 0 0 with mock.patch.object(os.path, 'exists', return_value=True): res = get_path_dev_freebsd('/etc', mnt_list) - self.assertNotEqual(res, None) + self.assertIsNotNone(res) def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 693882d2..c1911bf4 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -44,7 +44,7 @@ class TestMAASDataSource(TestCase): # verify that 'userdata' is not returned as part of the metadata self.assertFalse(('user-data' in md)) - self.assertEqual(vd, None) + self.assertIsNone(vd) def test_seed_dir_valid_extra(self): """Verify extra files do not affect seed_dir validity.""" diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index bce66125..b0f8e435 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -126,14 +126,14 @@ class TestOpenNebulaDataSource(TestCase): populate_dir(self.seed_dir, {'context.sh': ''}) results = ds.read_context_disk_dir(self.seed_dir) - self.assertEqual(results['userdata'], None) + self.assertIsNone(results['userdata']) self.assertEqual(results['metadata'], {}) def test_seed_dir_empty2_context(self): populate_context_dir(self.seed_dir, {}) results = ds.read_context_disk_dir(self.seed_dir) - self.assertEqual(results['userdata'], None) + self.assertIsNone(results['userdata']) self.assertEqual(results['metadata'], {}) def test_seed_dir_broken_context(self): diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 7bf55084..c2905d1a 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -242,7 +242,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(USER_DATA, ds_os.userdata_raw) self.assertEqual(2, len(ds_os.files)) self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) - self.assertEqual(ds_os.vendordata_raw, None) + self.assertIsNone(ds_os.vendordata_raw) @hp.activate def test_bad_datasource_meta(self): @@ -318,7 +318,7 @@ class TestVendorDataLoading(test_helpers.TestCase): self.assertEqual(self.cvj(data), data) def test_vd_load_dict_no_ci(self): - self.assertEqual(self.cvj({'foo': 'bar'}), None) + self.assertIsNone(self.cvj({'foo': 'bar'})) def test_vd_load_dict_ci_dict(self): self.assertRaises(ValueError, self.cvj, diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 3e09510c..477cf8ed 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -68,6 +68,6 @@ class TestReadOvfEnv(test_helpers.TestCase): md, ud, cfg = dsovf.read_ovf_environment(env) self.assertEqual({"instance-id": "inst-001"}, md) self.assertEqual({'password': "passw0rd"}, cfg) - self.assertEqual(None, ud) + self.assertIsNone(ud) # vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py index c9d03475..97168cf9 100644 --- a/tests/unittests/test_distros/test_resolv.py +++ b/tests/unittests/test_distros/test_resolv.py @@ -30,7 +30,7 @@ class TestResolvHelper(TestCase): def test_local_domain(self): rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertEqual(None, rp.local_domain) + self.assertIsNone(rp.local_domain) rp.local_domain = "bob" self.assertEqual('bob', rp.local_domain) diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 3fd0069d..e382210d 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -15,12 +15,12 @@ class TestLoadPowerState(t_help.TestCase): def test_no_config(self): # completely empty config should mean do nothing (cmd, _timeout, _condition) = psc.load_power_state({}) - self.assertEqual(cmd, None) + self.assertIsNone(cmd) def test_irrelevant_config(self): # no power_state field in config should return None for cmd (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}) - self.assertEqual(cmd, None) + self.assertIsNone(cmd) def test_invalid_mode(self): cfg = {'power_state': {'mode': 'gibberish'}} diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py index edb73d6d..e4d07622 100644 --- a/tests/unittests/test_handler/test_handler_snappy.py +++ b/tests/unittests/test_handler/test_handler_snappy.py @@ -419,7 +419,7 @@ class TestSnapConfig(FilesystemMockingTestCase): def test_snap_config_add_snap_user_no_config(self): usercfg = add_snap_user(cfg=None) - self.assertEqual(usercfg, None) + self.assertIsNone(usercfg) def test_snap_config_add_snap_user_not_dict(self): cfg = ['foobar'] @@ -428,7 +428,7 @@ class TestSnapConfig(FilesystemMockingTestCase): def test_snap_config_add_snap_user_no_email(self): cfg = {'assertions': [], 'known': True} usercfg = add_snap_user(cfg=cfg) - self.assertEqual(usercfg, None) + self.assertIsNone(usercfg) @mock.patch('cloudinit.config.cc_snap_config.util') def test_snap_config_add_snap_user_email_only(self, mock_util): diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py index 955f8dfa..f1979e89 100644 --- a/tests/unittests/test_helpers.py +++ b/tests/unittests/test_helpers.py @@ -32,6 +32,6 @@ class TestPaths(test_helpers.ResourceUsingTestCase): myds._instance_id = None mypaths = self.getCloudPaths(myds) - self.assertEqual(None, mypaths.get_ipath()) + self.assertIsNone(mypaths.get_ipath()) # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index feeab908..7104d00e 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1301,7 +1301,7 @@ class TestCmdlineReadKernelConfig(CiTestCase): files = sorted(populate_dir(self.tmp_dir(), content)) found = cmdline.read_kernel_cmdline_config( files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) - self.assertEqual(found, None) + self.assertIsNone(found) def test_ip_cmdline_both_ip_ip6(self): content = {'net-eth0.conf': DHCP_CONTENT_1, diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 490760d1..014aa6a3 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -44,7 +44,7 @@ class TestGetCfgOptionListOrStr(helpers.TestCase): """None is returned if key is not found and no default given.""" config = {} result = util.get_cfg_option_list(config, "key") - self.assertEqual(None, result) + self.assertIsNone(result) def test_not_found_with_default(self): """Default is returned if key is not found.""" @@ -432,13 +432,13 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): def test_none_returned_if_neither_source_has_data(self): self.patch_mapping({}) self._configure_dmidecode_return('key', 'value') - self.assertEqual(None, util.read_dmi_data('expect-fail')) + self.assertIsNone(util.read_dmi_data('expect-fail')) def test_none_returned_if_dmidecode_not_in_path(self): self.patched_funcs.enter_context( mock.patch.object(util, 'which', lambda _: False)) self.patch_mapping({}) - self.assertEqual(None, util.read_dmi_data('expect-fail')) + self.assertIsNone(util.read_dmi_data('expect-fail')) def test_dots_returned_instead_of_foxfox(self): # uninitialized dmi values show as \xff, return those as . @@ -626,8 +626,8 @@ class TestSubp(helpers.TestCase): def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) - self.assertEqual(err, None) - self.assertEqual(out, None) + self.assertIsNone(err) + self.assertIsNone(out) def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", diff --git a/tools/hacking.py b/tools/hacking.py index 6c320935..e6a05136 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -165,7 +165,8 @@ if __name__ == "__main__": pep8._main() finally: if len(_missingImport) > 0: - print >> sys.stderr, ("%i imports missing in this test environment" - % len(_missingImport)) + sys.stderr.write( + "%i imports missing in this test environment\n" % + len(_missingImport)) # vi: ts=4 expandtab diff --git a/tools/mock-meta.py b/tools/mock-meta.py index 82816e8a..f185dbf2 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -21,8 +21,8 @@ import functools import json import logging import os -import socket import random +import socket import string import sys import yaml diff --git a/tools/net-convert.py b/tools/net-convert.py index 870da639..b2db8adf 100755 --- a/tools/net-convert.py +++ b/tools/net-convert.py @@ -9,8 +9,8 @@ import yaml from cloudinit.sources.helpers import openstack from cloudinit.net import eni -from cloudinit.net import network_state from cloudinit.net import netplan +from cloudinit.net import network_state from cloudinit.net import sysconfig diff --git a/tox.ini b/tox.ini index fce07740..03bb5f19 100644 --- a/tox.ini +++ b/tox.ini @@ -34,7 +34,7 @@ setenv = [flake8] #H102 Apache 2.0 license header not found -ignore=H404,H405,H105,H301,H104,H403,H101,H102 +ignore=H404,H405,H105,H301,H104,H403,H101,H102,H106,H304 exclude = .venv,.tox,dist,doc,*egg,.git,build,tools [testenv:doc] -- cgit v1.2.3 From e5b2c011440aefe036c71a8c5e8ec547cc80f270 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 May 2017 21:33:30 -0400 Subject: python2.6: fix unit tests usage of assertNone and format. python2.6 unittest.TestCase does not have the assertIsNone or assertIsNotNone. We just have to explicitly use the unittest2 version, which we get from helpers. The desire to use assertIsNone comes from flake8 (through hacking, I believe). Also, fix "{}.format('foo')" which is not valid in python2.6. --- tests/unittests/test_datasource/test_altcloud.py | 3 ++- tests/unittests/test_handler/test_handler_ntp.py | 32 ++++++++++++------------ 2 files changed, 18 insertions(+), 17 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index b6d4a453..9c46abc1 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -17,7 +17,8 @@ import tempfile from cloudinit import helpers from cloudinit import util -from unittest import TestCase + +from ..helpers import TestCase import cloudinit.sources.DataSourceAltCloud as dsac diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 21f2ab19..bc4277b7 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -59,7 +59,7 @@ class TestNtp(FilesystemMockingTestCase): with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): cc_ntp.rename_ntp_conf() self.assertFalse(os.path.exists(ntpconf)) - self.assertTrue(os.path.exists("{}.dist".format(ntpconf))) + self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) def test_ntp_rename_ntp_conf_skip_missing(self): """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" @@ -67,7 +67,7 @@ class TestNtp(FilesystemMockingTestCase): self.assertFalse(os.path.exists(ntpconf)) with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): cc_ntp.rename_ntp_conf() - self.assertFalse(os.path.exists("{}.dist".format(ntpconf))) + self.assertFalse(os.path.exists("{0}.dist".format(ntpconf))) self.assertFalse(os.path.exists(ntpconf)) def test_write_ntp_config_template_from_ntp_conf_tmpl_with_servers(self): @@ -84,7 +84,7 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud(distro) ntp_conf = self.tmp_path("ntp.conf", self.new_root) # Doesn't exist # Create ntp.conf.tmpl - with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): cc_ntp.write_ntp_config_template(cfg, mycloud) @@ -107,10 +107,10 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud(distro) ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist # Create ntp.conf.tmpl which isn't read - with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(b'NOT READ: ntp.conf..tmpl is primary') # Create ntp.conf.tmpl. - with open('{}.{}.tmpl'.format(ntp_conf, distro), 'wb') as stream: + with open('{0}.{1}.tmpl'.format(ntp_conf, distro), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): cc_ntp.write_ntp_config_template(cfg, mycloud) @@ -129,19 +129,19 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud(distro) ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist # Create ntp.conf.tmpl - with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): cc_ntp.write_ntp_config_template({}, mycloud) content = util.read_file_or_url('file://' + ntp_conf).contents default_pools = [ - "{}.{}.pool.ntp.org".format(x, distro) + "{0}.{1}.pool.ntp.org".format(x, distro) for x in range(0, cc_ntp.NR_POOL_SERVERS)] self.assertEqual( - "servers []\npools {}\n".format(default_pools), + "servers []\npools {0}\n".format(default_pools), content.decode()) self.assertIn( - "Adding distro default ntp pool servers: {}".format( + "Adding distro default ntp pool servers: {0}".format( ",".join(default_pools)), self.logs.getvalue()) @@ -158,7 +158,7 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud('ubuntu') ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist # Create ntp.conf.tmpl - with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): with mock.patch.object(util, 'which', return_value=None): @@ -166,7 +166,7 @@ class TestNtp(FilesystemMockingTestCase): content = util.read_file_or_url('file://' + ntp_conf).contents self.assertEqual( - 'servers {}\npools {}\n'.format(servers, pools), + 'servers {0}\npools {1}\n'.format(servers, pools), content.decode()) def test_ntp_handler_real_distro_templates(self): @@ -184,7 +184,7 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud(distro) root_dir = dirname(dirname(os.path.realpath(util.__file__))) tmpl_file = os.path.join( - '{}/templates/ntp.conf.{}.tmpl'.format(root_dir, distro)) + '{0}/templates/ntp.conf.{1}.tmpl'.format(root_dir, distro)) # Create a copy in our tmp_dir shutil.copy( tmpl_file, @@ -195,15 +195,15 @@ class TestNtp(FilesystemMockingTestCase): content = util.read_file_or_url('file://' + ntp_conf).contents expected_servers = '\n'.join([ - 'server {} iburst'.format(server) for server in servers]) + 'server {0} iburst'.format(server) for server in servers]) self.assertIn( expected_servers, content.decode(), - 'failed to render ntp.conf for distro:{}'.format(distro)) + 'failed to render ntp.conf for distro:{0}'.format(distro)) expected_pools = '\n'.join([ - 'pool {} iburst'.format(pool) for pool in pools]) + 'pool {0} iburst'.format(pool) for pool in pools]) self.assertIn( expected_pools, content.decode(), - 'failed to render ntp.conf for distro:{}'.format(distro)) + 'failed to render ntp.conf for distro:{0}'.format(distro)) def test_no_ntpcfg_does_nothing(self): """When no ntp section is defined handler logs a warning and noops.""" -- cgit v1.2.3 From d27c49391df343d25bd2e24045d2be6bf39c30d2 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 25 May 2017 10:21:21 -0800 Subject: GCE: Update the attribute used to find instance SSH keys. Per the documentation at https://cloud.google.com/compute/docs/storing-retrieving-metadata The instance-level SSH key was named 'sshKeys' and now is 'ssh-keys'. The project-level SSH key attribute has not changed so is intentionally not changed here. LP: #1693582 --- cloudinit/sources/DataSourceGCE.py | 2 +- tests/unittests/test_datasource/test_gce.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'tests/unittests/test_datasource') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index e9afda9c..684eac86 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -71,7 +71,7 @@ class DataSourceGCE(sources.DataSource): ('availability-zone', ('instance/zone',), True, True), ('local-hostname', ('instance/hostname',), True, True), ('public-keys', ('project/attributes/sshKeys', - 'instance/attributes/sshKeys'), False, True), + 'instance/attributes/ssh-keys'), False, True), ('user-data', ('instance/attributes/user-data',), False, False), ('user-data-encoding', ('instance/attributes/user-data-encoding',), False, True), diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 3eaa58e3..6fd1341d 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -140,7 +140,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): def test_instance_level_ssh_keys_are_used(self): key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content) + meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) _set_mock_metadata(meta) self.ds.get_data() @@ -150,7 +150,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): def test_instance_level_keys_replace_project_level_keys(self): key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content) + meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) _set_mock_metadata(meta) self.ds.get_data() -- cgit v1.2.3