From 4f0f171c29bb9abb5cbb6f9adbe68015089aeed9 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 2 May 2017 22:26:50 +0000 Subject: fs_setup: if cmd is specified, use shell interpretation. If 'cmd' is provided to a fs_setup entry, then cloud-init was trying to execute the rendered string as a single name, rather than splitting the string. The change here will pass the string to shell for interpretation so that it is split there. Also fix some documentation errors and warn when fs_opts or overwrite is provided along with 'cmd'. LP: #1687712 --- cloudinit/config/cc_disk_setup.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'cloudinit/config/cc_disk_setup.py') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index f49386e3..6f827ddc 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -910,12 +910,23 @@ def mkfs(fs_cfg): "must be set.", label) # Create the commands + shell = False if fs_cmd: fs_cmd = fs_cfg['cmd'] % { 'label': label, 'filesystem': fs_type, 'device': device, } + shell = True + + if overwrite: + LOG.warning( + "fs_setup:overwrite ignored because cmd was specified: %s", + fs_cmd) + if fs_opts: + LOG.warning( + "fs_setup:extra_opts ignored because cmd was specified: %s", + fs_cmd) else: # Find the mkfs command mkfs_cmd = util.which("mkfs.%s" % fs_type) @@ -936,14 +947,14 @@ def mkfs(fs_cfg): if overwrite or device_type(device) == "disk": fs_cmd.append(lookup_force_flag(fs_type)) - # Add the extends FS options - if fs_opts: - fs_cmd.extend(fs_opts) + # Add the extends FS options + if fs_opts: + fs_cmd.extend(fs_opts) LOG.debug("Creating file system %s on %s", label, device) - LOG.debug(" Using cmd: %s", " ".join(fs_cmd)) + LOG.debug(" Using cmd: %s", str(fs_cmd)) try: - util.subp(fs_cmd) + util.subp(fs_cmd, shell=shell) except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) -- cgit v1.2.3 From 31b6f173280fcc8e9be2732ae2e9b6f6c89679d4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 28 Apr 2017 09:23:25 -0400 Subject: Azure: fix reformatting of ephemeral disks on resize to large types. Large instance types have a different disk format on the newly partitioned ephemeral drive. So we have to adjust the logic in the Azure datasource to recognize that a disk with 2 partitions and an empty ntfs filesystem on the second one is acceptable. This also adjusts the datasources's builtin fs_setup config to remove the 'replace_fs' entry. This entry was previously ignored, and confusing. I've clarified the doc on that also. LP: #1686514 --- cloudinit/config/cc_disk_setup.py | 19 +- cloudinit/sources/DataSourceAzure.py | 84 ++++--- tests/unittests/test_datasource/test_azure.py | 265 ++++++++++++++++++--- .../test_handler/test_handler_disk_setup.py | 16 ++ 4 files changed, 307 insertions(+), 77 deletions(-) (limited to 'cloudinit/config/cc_disk_setup.py') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 6f827ddc..29eb5dd8 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -68,6 +68,9 @@ specified using ``filesystem``. Using ``overwrite: true`` for filesystems is dangerous and can lead to data loss, so double check the entry in ``fs_setup``. +.. note:: + ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``. + **Internal name:** ``cc_disk_setup`` **Module frequency:** per instance @@ -127,7 +130,7 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Partitioning disks: %s", str(disk_setup)) for disk, definition in disk_setup.items(): if not isinstance(definition, dict): - log.warn("Invalid disk definition for %s" % disk) + log.warning("Invalid disk definition for %s" % disk) continue try: @@ -144,7 +147,7 @@ def handle(_name, cfg, cloud, log, _args): update_fs_setup_devices(fs_setup, cloud.device_name_to_device) for definition in fs_setup: if not isinstance(definition, dict): - log.warn("Invalid file system definition: %s" % definition) + log.warning("Invalid file system definition: %s" % definition) continue try: @@ -199,8 +202,13 @@ def update_fs_setup_devices(disk_setup, tformer): definition['_origname'] = origname definition['device'] = tformed - if part and 'partition' in definition: - definition['_partition'] = definition['partition'] + if part: + # In origname with .N, N overrides 'partition' key. + if 'partition' in definition: + LOG.warning("Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", part, origname, + definition) + definition['_partition'] = definition['partition'] definition['partition'] = part @@ -849,7 +857,8 @@ def mkfs(fs_cfg): # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device %s has %s %s", device, check_label, check_fstype) + LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", + device, check_label, check_fstype) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 5254e18a..44857c09 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -196,8 +196,7 @@ BUILTIN_CLOUD_CONFIG = { 'overwrite': True}, }, 'fs_setup': [{'filesystem': DEFAULT_FS, - 'device': 'ephemeral0.1', - 'replace_fs': 'ntfs'}], + 'device': 'ephemeral0.1'}], } DS_CFG_PATH = ['datasource', DS_NAME] @@ -413,56 +412,71 @@ class DataSourceAzureNet(sources.DataSource): return +def _partitions_on_device(devpath, maxnum=16): + # return a list of tuples (ptnum, path) for each part on devpath + for suff in ("-part", "p", ""): + found = [] + for pnum in range(1, maxnum): + ppath = devpath + suff + str(pnum) + if os.path.exists(ppath): + found.append((pnum, os.path.realpath(ppath))) + if found: + return found + return [] + + +def _has_ntfs_filesystem(devpath): + ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) + LOG.debug('ntfs_devices found = %s', ntfs_devices) + return os.path.realpath(devpath) in ntfs_devices + + def can_dev_be_reformatted(devpath): - # determine if the ephemeral block device path devpath - # is newly formatted after a resize. + """Determine if block device devpath is newly formatted ephemeral. + + A newly formatted disk will: + a.) have a partition table (dos or gpt) + b.) have 1 partition that is ntfs formatted, or + have 2 partitions with the second partition ntfs formatted. + (larger instances with >2TB ephemeral disk have gpt, and will + have a microsoft reserved partition as part 1. LP: #1686514) + c.) the ntfs partition will have no files other than possibly + 'dataloss_warning_readme.txt'""" if not os.path.exists(devpath): return False, 'device %s does not exist' % devpath - realpath = os.path.realpath(devpath) - LOG.debug('Resolving realpath of %s -> %s', devpath, realpath) - - # it is possible that the block device might exist, but the kernel - # have not yet read the partition table and sent events. we udevadm settle - # to hope to resolve that. Better here would probably be to test and see, - # and then settle if we didn't find anything and try again. - if util.which("udevadm"): - util.subp(["udevadm", "settle"]) + LOG.debug('Resolving realpath of %s -> %s', devpath, + os.path.realpath(devpath)) # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource # where partitions are "1" or "-part1" or "p1" - part1path = None - for suff in ("-part", "p", ""): - cand = devpath + suff + "1" - if os.path.exists(cand): - if os.path.exists(devpath + suff + "2"): - msg = ('device %s had more than 1 partition: %s, %s' % - devpath, cand, devpath + suff + "2") - return False, msg - part1path = cand - break - - if part1path is None: + partitions = _partitions_on_device(devpath) + if len(partitions) == 0: return False, 'device %s was not partitioned' % devpath + elif len(partitions) > 2: + msg = ('device %s had 3 or more partitions: %s' % + (devpath, ' '.join([p[1] for p in partitions]))) + return False, msg + elif len(partitions) == 2: + cand_part, cand_path = partitions[1] + else: + cand_part, cand_path = partitions[0] - real_part1path = os.path.realpath(part1path) - ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) - LOG.debug('ntfs_devices found = %s', ntfs_devices) - if real_part1path not in ntfs_devices: - msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' % - (part1path, real_part1path, devpath)) + if not _has_ntfs_filesystem(cand_path): + msg = ('partition %s (%s) on device %s was not ntfs formatted' % + (cand_part, cand_path, devpath)) return False, msg def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) - bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' % - (part1path, real_part1path, devpath)) + bmsg = ('partition %s (%s) on device %s was ntfs formatted' % + (cand_part, cand_path, devpath)) try: - file_count = util.mount_cb(part1path, count_files) + file_count = util.mount_cb(cand_path, count_files) except util.MountFailedError as e: - return False, bmsg + ' but mount of %s failed: %s' % (part1path, e) + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) if file_count != 0: return False, bmsg + ' but had %d files on it.' % file_count diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index e6b0dcb4..67cddeb9 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,12 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import helpers -from cloudinit.util import b64e, decode_binary, load_file -from cloudinit.sources import DataSourceAzure +from cloudinit.util import b64e, decode_binary, load_file, write_file +from cloudinit.sources import DataSourceAzure as dsaz from cloudinit.util import find_freebsd_part from cloudinit.util import get_path_dev_freebsd -from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest +from ..helpers import (CiTestCase, TestCase, populate_dir, mock, + ExitStack, PY26, SkipTest) import crypt import os @@ -98,7 +99,6 @@ class TestAzureDataSource(TestCase): self.patches.enter_context(mock.patch.object(module, name, new)) def _get_mockds(self): - mod = DataSourceAzure sysctl_out = "dev.storvsc.3.%pnpinfo: "\ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" @@ -123,14 +123,14 @@ scbus-1 on xpt0 bus 0 at scbus3 target 1 lun 0 (da1,pass2) """ self.apply_patches([ - (mod, 'get_dev_storvsc_sysctl', mock.MagicMock( + (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( return_value=sysctl_out)), - (mod, 'get_camcontrol_dev_bus', mock.MagicMock( + (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( return_value=camctl_devbus)), - (mod, 'get_camcontrol_dev', mock.MagicMock( + (dsaz, 'get_camcontrol_dev', mock.MagicMock( return_value=camctl_dev)) ]) - return mod + return dsaz def _get_ds(self, data, agent_command=None): @@ -152,8 +152,7 @@ scbus-1 on xpt0 bus 0 populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) - mod = DataSourceAzure - mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.get_metadata_from_fabric = mock.MagicMock(return_value={ 'public-keys': [], @@ -162,19 +161,19 @@ scbus-1 on xpt0 bus 0 self.instance_id = 'test-instance-id' self.apply_patches([ - (mod, 'list_possible_azure_ds_devs', dsdevs), - (mod, 'invoke_agent', _invoke_agent), - (mod, 'wait_for_files', _wait_for_files), - (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), - (mod, 'perform_hostname_bounce', mock.MagicMock()), - (mod, 'get_hostname', mock.MagicMock()), - (mod, 'set_hostname', mock.MagicMock()), - (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric), - (mod.util, 'read_dmi_data', mock.MagicMock( + (dsaz, 'list_possible_azure_ds_devs', dsdevs), + (dsaz, 'invoke_agent', _invoke_agent), + (dsaz, 'wait_for_files', _wait_for_files), + (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), + (dsaz, 'perform_hostname_bounce', mock.MagicMock()), + (dsaz, 'get_hostname', mock.MagicMock()), + (dsaz, 'set_hostname', mock.MagicMock()), + (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), + (dsaz.util, 'read_dmi_data', mock.MagicMock( return_value=self.instance_id)), ]) - dsrc = mod.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -418,7 +417,7 @@ fdescfs /dev/fd fdescfs rw 0 0 cfg = dsrc.get_config_obj() self.assertEqual(dsrc.device_name_to_device("ephemeral0"), - DataSourceAzure.RESOURCE_DISK_PATH) + dsaz.RESOURCE_DISK_PATH) assert 'disk_setup' in cfg assert 'fs_setup' in cfg self.assertIsInstance(cfg['disk_setup'], dict) @@ -468,14 +467,13 @@ fdescfs /dev/fd fdescfs rw 0 0 # Make sure that the redacted password on disk is not used by CI self.assertNotEqual(dsrc.cfg.get('password'), - DataSourceAzure.DEF_PASSWD_REDACTION) + dsaz.DEF_PASSWD_REDACTION) # Make sure that the password was really encrypted et = ET.fromstring(on_disk_ovf) for elem in et.iter(): if 'UserPassword' in elem.tag: - self.assertEqual(DataSourceAzure.DEF_PASSWD_REDACTION, - elem.text) + self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) def test_ovf_env_arrives_in_waagent_dir(self): xml = construct_valid_ovf_env(data={}, userdata="FOODATA") @@ -524,17 +522,17 @@ class TestAzureBounce(TestCase): def mock_out_azure_moving_parts(self): self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'invoke_agent')) + mock.patch.object(dsaz, 'invoke_agent')) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'wait_for_files')) + mock.patch.object(dsaz, 'wait_for_files')) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', + mock.patch.object(dsaz, 'list_possible_azure_ds_devs', mock.MagicMock(return_value=[]))) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', + mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(DataSourceAzure.util, 'read_dmi_data', + mock.patch.object(dsaz.util, 'read_dmi_data', mock.MagicMock(return_value='test-instance-id'))) def setUp(self): @@ -543,13 +541,13 @@ class TestAzureBounce(TestCase): self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') self.paths = helpers.Paths({'cloud_dir': self.tmp}) self.addCleanup(shutil.rmtree, self.tmp) - DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.patches = ExitStack() self.mock_out_azure_moving_parts() self.get_hostname = self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'get_hostname')) + mock.patch.object(dsaz, 'get_hostname')) self.set_hostname = self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'set_hostname')) + mock.patch.object(dsaz, 'set_hostname')) self.subp = self.patches.enter_context( mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) @@ -560,7 +558,7 @@ class TestAzureBounce(TestCase): if ovfcontent is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': ovfcontent}) - dsrc = DataSourceAzure.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzureNet( {}, distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -673,7 +671,7 @@ class TestAzureBounce(TestCase): def test_default_bounce_command_used_by_default(self): cmd = 'default-bounce-command' - DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd + dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) self._get_ds(data, agent_command=['not', '__builtin__']).get_data() @@ -701,15 +699,208 @@ class TestAzureBounce(TestCase): class TestReadAzureOvf(TestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) - self.assertRaises(DataSourceAzure.BrokenAzureDataSource, - DataSourceAzure.read_azure_ovf, invalid_xml) + self.assertRaises(dsaz.BrokenAzureDataSource, + dsaz.read_azure_ovf, invalid_xml) def test_load_with_pubkeys(self): mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] content = construct_valid_ovf_env(pubkeys=pubkeys) - (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) + (_md, _ud, cfg) = dsaz.read_azure_ovf(content) for mypk in mypklist: self.assertIn(mypk, cfg['_pubkeys']) + +class TestCanDevBeReformatted(CiTestCase): + warning_file = 'dataloss_warning_readme.txt' + + def _domock(self, mockpath, sattr=None): + patcher = mock.patch(mockpath) + setattr(self, sattr, patcher.start()) + self.addCleanup(patcher.stop) + + def setUp(self): + super(TestCanDevBeReformatted, self).setUp() + + def patchup(self, devs): + bypath = {} + for path, data in devs.items(): + bypath[path] = data + if 'realpath' in data: + bypath[data['realpath']] = data + for ppath, pdata in data.get('partitions', {}).items(): + bypath[ppath] = pdata + if 'realpath' in data: + bypath[pdata['realpath']] = pdata + + def realpath(d): + return bypath[d].get('realpath', d) + + def partitions_on_device(devpath): + parts = bypath.get(devpath, {}).get('partitions', {}) + ret = [] + for path, data in parts.items(): + ret.append((data.get('num'), realpath(path))) + # return sorted by partition number + return sorted(ret, key=lambda d: d[0]) + + def mount_cb(device, callback): + p = self.tmp_dir() + for f in bypath.get(device).get('files', []): + write_file(os.path.join(p, f), content=f) + return callback(p) + + def has_ntfs_fs(device): + return bypath.get(device, {}).get('fs') == 'ntfs' + + p = 'cloudinit.sources.DataSourceAzure' + self._domock(p + "._partitions_on_device", 'm_partitions_on_device') + self._domock(p + "._has_ntfs_filesystem", 'm_has_ntfs_filesystem') + self._domock(p + ".util.mount_cb", 'm_mount_cb') + self._domock(p + ".os.path.realpath", 'm_realpath') + self._domock(p + ".os.path.exists", 'm_exists') + + self.m_exists.side_effect = lambda p: p in bypath + self.m_realpath.side_effect = realpath + self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs + self.m_mount_cb.side_effect = mount_cb + self.m_partitions_on_device.side_effect = partitions_on_device + + def test_three_partitions_is_false(self): + """A disk with 3 partitions can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2}, + '/dev/sda3': {'num': 3}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("3 or more", msg.lower()) + + def test_no_partitions_is_false(self): + """A disk with no partitions can not be formatted.""" + self.patchup({'/dev/sda': {}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("not partitioned", msg.lower()) + + def test_two_partitions_not_ntfs_false(self): + """2 partitions and 2nd not ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("not ntfs", msg.lower()) + + def test_two_partitions_ntfs_populated_false(self): + """2 partitions and populated ntfs fs on 2nd can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', + 'files': ['secret.txt']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("files on it", msg.lower()) + + def test_two_partitions_ntfs_empty_is_true(self): + """2 partitions and empty ntfs fs on 2nd can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_not_ntfs_false(self): + """1 partition witih fs other than ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'zfs'}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("not ntfs", msg.lower()) + + def test_one_partition_ntfs_populated_false(self): + """1 mountable ntfs partition with many files can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['file1.txt', 'file2.exe']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("files on it", msg.lower()) + + def test_one_partition_ntfs_empty_is_true(self): + """1 mountable ntfs partition and no files can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): + """1 mountable ntfs partition and only warn file can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_through_realpath_is_true(self): + """A symlink to a device with 1 ntfs partition can be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath) + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_three_partition_through_realpath_is_false(self): + """A symlink to a device with 3 partitions can not be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'}, + epath + '-part2': {'num': 2, 'fs': 'ext3', + 'realpath': '/dev/sdb2'}, + epath + '-part3': {'num': 3, 'fs': 'ext', + 'realpath': '/dev/sdb3'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath) + self.assertEqual(False, value) + self.assertIn("3 or more", msg.lower()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index 9f00d46a..68fc6aae 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -151,6 +151,22 @@ class TestUpdateFsSetupDevices(TestCase): 'filesystem': 'xfs' }, fs_setup) + def test_dotted_devname_populates_partition(self): + fs_setup = { + 'device': 'ephemeral0.1', + 'label': 'test2', + 'filesystem': 'xfs' + } + cc_disk_setup.update_fs_setup_devices([fs_setup], + lambda device: device) + self.assertEqual({ + '_origname': 'ephemeral0.1', + 'device': 'ephemeral0', + 'partition': '1', + 'label': 'test2', + 'filesystem': 'xfs' + }, fs_setup) + @mock.patch('cloudinit.config.cc_disk_setup.find_device_node', return_value=('/dev/xdb1', False)) -- cgit v1.2.3 From 3507b59eaa4914ba041f9c7ae987a2cfb036d8b5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 19 May 2017 14:57:04 -0400 Subject: disk_setup: fix several issues with gpt disk partitions. This fixes several shortcomings of disk_setup with gpt disks. * 'sgdisk -p' was being used to determine the size of a disk. this can fail if it believes there is a bad gpt partition table. Instead we just use blockdev now for both mbr or gpt disks. * parsing of sgdisk -p output assumed that the 'name' of the partition type would not have any spaces (Microsoft basic data) * interaction with sgdisk did not realize that sgdisk wants input of '8300' rather than '83' and will output the same. LP: #1692087 --- cloudinit/config/cc_disk_setup.py | 80 ++++++++++++---------- .../test_handler/test_handler_disk_setup.py | 2 +- 2 files changed, 46 insertions(+), 36 deletions(-) (limited to 'cloudinit/config/cc_disk_setup.py') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 29eb5dd8..e1505b39 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -431,7 +431,7 @@ def get_dyn_func(*args): raise Exception("No such function %s to call!" % func_name) -def get_mbr_hdd_size(device): +def get_hdd_size(device): try: size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device]) sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device]) @@ -441,22 +441,6 @@ def get_mbr_hdd_size(device): return int(size_in_bytes) / int(sector_size) -def get_gpt_hdd_size(device): - out, _ = util.subp([SGDISK_CMD, '-p', device], update_env=LANG_C_ENV) - for line in out.splitlines(): - if line.startswith("Disk"): - return line.split()[2] - raise Exception("Failed to get %s size from sgdisk" % (device)) - - -def get_hdd_size(table_type, device): - """ - Returns the hard disk size. - This works with any disk type, including GPT. - """ - return get_dyn_func("get_%s_hdd_size", table_type, device) - - def check_partition_mbr_layout(device, layout): """ Returns true if the partition layout matches the one on the disk @@ -504,12 +488,35 @@ def check_partition_gpt_layout(device, layout): device, e)) out_lines = iter(out.splitlines()) - # Skip header + # Skip header. Output looks like: + # *************************************************************** + # Found invalid GPT and valid MBR; converting MBR to GPT format + # in memory. + # *************************************************************** + # + # Disk /dev/vdb: 83886080 sectors, 40.0 GiB + # Logical sector size: 512 bytes + # Disk identifier (GUID): 8A7F11AD-3953-491B-8051-077E01C8E9A7 + # Partition table holds up to 128 entries + # First usable sector is 34, last usable sector is 83886046 + # Partitions will be aligned on 2048-sector boundaries + # Total free space is 83476413 sectors (39.8 GiB) + # + # Number Start (sector) End (sector) Size Code Name + # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: if line.strip().startswith('Number'): break - return [line.strip().split()[-1] for line in out_lines] + codes = [line.strip().split()[5] for line in out_lines] + cleaned = [] + + # user would expect a code '83' to be Linux, but sgdisk outputs 8300. + for code in codes: + if len(code) == 4 and code.endswith("00"): + code = code[0:2] + cleaned.append(code) + return cleaned def check_partition_layout(table_type, device, layout): @@ -523,6 +530,8 @@ def check_partition_layout(table_type, device, layout): found_layout = get_dyn_func( "check_partition_%s_layout", table_type, device, layout) + LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", + table_type, device, layout, found_layout) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -530,18 +539,17 @@ def check_partition_layout(table_type, device, layout): return True return False - else: - if len(found_layout) != len(layout): - return False - else: - # This just makes sure that the number of requested - # partitions and the type labels are right - for x in range(1, len(layout) + 1): - if isinstance(layout[x - 1], tuple): - _, part_type = layout[x] - if int(found_layout[x]) != int(part_type): - return False - return True + elif len(found_layout) == len(layout): + # This just makes sure that the number of requested + # partitions and the type labels are right + layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None + for x in layout] + LOG.debug("Layout types=%s. Found types=%s", + layout_types, found_layout) + for itype, ftype in zip(layout_types, found_layout): + if itype is not None and str(ftype) != str(itype): + return False + return True return False @@ -704,9 +712,11 @@ def exec_mkpart_gpt(device, layout): util.subp([SGDISK_CMD, '-n', '{}:{}:{}'.format(index, start, end), device]) if partition_type is not None: + # convert to a 4 char (or more) string right padded with 0 + # 82 -> 8200. 'Linux' -> 'Linux' + pinput = str(partition_type).ljust(4, "0") util.subp( - [SGDISK_CMD, - '-t', '{}:{}'.format(index, partition_type), device]) + [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) except Exception: LOG.warning("Failed to partition device %s", device) raise @@ -777,8 +787,8 @@ def mkpart(device, definition): LOG.debug("Skipping partitioning on configured device %s", device) return - LOG.debug("Checking for device size") - device_size = get_hdd_size(table_type, device) + LOG.debug("Checking for device size of %s", device) + device_size = get_hdd_size(device) LOG.debug("Calculating partition layout") part_definition = get_partition_layout(table_type, device_size, layout) diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index 35dff3ee..e3226978 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -66,7 +66,7 @@ class TestGetMbrHddSize(TestCase): size_in_sectors = size_in_bytes / sector_size self._configure_subp_mock(size_in_bytes, sector_size) self.assertEqual(size_in_sectors, - cc_disk_setup.get_mbr_hdd_size('/dev/sda1')) + cc_disk_setup.get_hdd_size('/dev/sda1')) def test_size_for_512_byte_sectors(self): self._test_for_sector_size(512) -- cgit v1.2.3 From 1815c6d801933c47a01f1a94a8e689824f6797b4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 May 2017 13:06:08 -0400 Subject: disk_setup: udev settle before attempting partitioning or fs creation. This attempts to use udevadm settle to wait until devices have been fully "realized". If a device exists, there may still be events in the udev queue that would create its partition table entries. We need to wait until those have been processed also. LP: #1692093 --- cloudinit/config/cc_disk_setup.py | 26 +++++++++++++++++++--- .../test_handler/test_handler_disk_setup.py | 2 ++ 2 files changed, 25 insertions(+), 3 deletions(-) (limited to 'cloudinit/config/cc_disk_setup.py') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index e1505b39..c2b83aea 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -680,14 +680,14 @@ def read_parttbl(device): reliable way to probe the partition table. """ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] - udev_cmd = [UDEVADM_CMD, 'settle'] + udevadm_settle() try: - util.subp(udev_cmd) util.subp(blkdev_cmd) - util.subp(udev_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) + udevadm_settle() + def exec_mkpart_mbr(device, layout): """ @@ -737,6 +737,24 @@ def exec_mkpart(table_type, device, layout): return get_dyn_func("exec_mkpart_%s", table_type, device, layout) +def udevadm_settle(): + util.subp(['udevadm', 'settle']) + + +def assert_and_settle_device(device): + """Assert that device exists and settle so it is fully recognized.""" + if not os.path.exists(device): + udevadm_settle() + if not os.path.exists(device): + raise RuntimeError("Device %s did not exist and was not created " + "with a udevamd settle." % device) + + # Whether or not the device existed above, it is possible that udev + # events that would populate udev database (for reading by lsdname) have + # not yet finished. So settle again. + udevadm_settle() + + def mkpart(device, definition): """ Creates the partition table. @@ -752,6 +770,7 @@ def mkpart(device, definition): device: the device to work on. """ # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) LOG.debug("Checking values for %s definition", device) @@ -852,6 +871,7 @@ def mkfs(fs_cfg): overwrite = fs_cfg.get('overwrite', False) # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) # This allows you to define the default ephemeral or swap diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index e3226978..916a0d7a 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -168,6 +168,8 @@ class TestUpdateFsSetupDevices(TestCase): }, fs_setup) +@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device', + return_value=None) @mock.patch('cloudinit.config.cc_disk_setup.find_device_node', return_value=('/dev/xdb1', False)) @mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None) -- cgit v1.2.3