diff --git a/0014-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch b/0014-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch new file mode 100644 index 0000000000000000000000000000000000000000..44d60a37e860417dbcb3069f49a28dbf98a7bf84 --- /dev/null +++ b/0014-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch @@ -0,0 +1,103 @@ +From 30782ea4482e8118996ffa69f967531515761179 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 20 Jan 2025 13:02:50 +0100 +Subject: [PATCH] Do not remove PVs from devices file if disabled or doesn't + exists + +When the file doesn't exists the 'lvmdevices --deldev' call will +fail but it will still create the devices file. This means we now +have an empty devices file and all subsequent LVM calls will fail. + +Resolves: RHEL-65846 +--- + blivet/formats/lvmpv.py | 10 +++++++ + tests/unit_tests/formats_tests/lvmpv_test.py | 28 ++++++++++++++++++++ + 2 files changed, 38 insertions(+) + +diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py +index 982233878..aa5cc0a5a 100644 +--- a/blivet/formats/lvmpv.py ++++ b/blivet/formats/lvmpv.py +@@ -171,6 +171,16 @@ def lvmdevices_remove(self): + if not lvm.HAVE_LVMDEVICES: + raise PhysicalVolumeError("LVM devices file feature is not supported") + ++ if not os.path.exists(lvm.LVM_DEVICES_FILE): ++ log.debug("Not removing %s from devices file: %s doesn't exist", ++ self.device, lvm.LVM_DEVICES_FILE) ++ return ++ ++ if not flags.lvm_devices_file: ++ log.debug("Not removing %s from devices file: 'lvm_devices_file' flag is set to False", ++ self.device) ++ return ++ + try: + blockdev.lvm.devices_delete(self.device) + except blockdev.LVMError as e: +diff --git a/tests/unit_tests/formats_tests/lvmpv_test.py b/tests/unit_tests/formats_tests/lvmpv_test.py +index 8d410f4fd..890e3cb19 100644 +--- a/tests/unit_tests/formats_tests/lvmpv_test.py ++++ b/tests/unit_tests/formats_tests/lvmpv_test.py +@@ -38,6 +38,11 @@ def test_lvm_devices(self): + + mock["blockdev"].lvm.devices_add.assert_not_called() + ++ # LVM devices file not enabled/supported -> devices_delete should not be called ++ fmt._destroy() ++ ++ mock["blockdev"].lvm.devices_delete.assert_not_called() ++ + with self.patches() as mock: + # LVM devices file enabled and devices file exists -> devices_add should be called + mock["lvm"].HAVE_LVMDEVICES = True +@@ -47,6 +52,11 @@ def test_lvm_devices(self): + + mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test") + ++ # LVM devices file enabled and devices file exists -> devices_delete should be called ++ fmt._destroy() ++ ++ mock["blockdev"].lvm.devices_delete.assert_called_with("/dev/test") ++ + with self.patches() as mock: + # LVM devices file enabled and devices file doesn't exist + # and no existing VGs present -> devices_add should be called +@@ -58,6 +68,12 @@ def test_lvm_devices(self): + + mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test") + ++ # LVM devices file enabled but devices file doesn't exist ++ # -> devices_delete should not be called ++ fmt._destroy() ++ ++ mock["blockdev"].lvm.devices_delete.assert_not_called() ++ + with self.patches() as mock: + # LVM devices file enabled and devices file doesn't exist + # and existing VGs present -> devices_add should not be called +@@ -69,6 +85,12 @@ def test_lvm_devices(self): + + mock["blockdev"].lvm.devices_add.assert_not_called() + ++ # LVM devices file enabled but devices file doesn't exist ++ # -> devices_delete should not be called ++ fmt._destroy() ++ ++ mock["blockdev"].lvm.devices_delete.assert_not_called() ++ + with self.patches() as mock: + # LVM devices file enabled and devices file exists + # but flag set to false -> devices_add should not be called +@@ -81,5 +103,11 @@ def test_lvm_devices(self): + + mock["blockdev"].lvm.devices_add.assert_not_called() + ++ # LVM devices file enabled and devices file exists ++ # but flag set to false -> devices_delete should not be called ++ fmt._destroy() ++ ++ mock["blockdev"].lvm.devices_delete.assert_not_called() ++ + # reset the flag back + flags.lvm_devices_file = True diff --git a/0015-iscsi-Use-node-startup-onboot-option-for-Login.patch b/0015-iscsi-Use-node-startup-onboot-option-for-Login.patch new file mode 100644 index 0000000000000000000000000000000000000000..eaad4bad8b9f91334673649f6744bfe32a380371 --- /dev/null +++ b/0015-iscsi-Use-node-startup-onboot-option-for-Login.patch @@ -0,0 +1,23 @@ +From c16a44b6627a6b4c1cb178f4c2127f21a53344ec Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 3 Mar 2025 12:33:34 +0100 +Subject: [PATCH] iscsi: Use node.startup=onboot option for Login + +Resolves: RHEL-53719 +--- + blivet/iscsi.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/iscsi.py b/blivet/iscsi.py +index 95674665b..f66c38934 100644 +--- a/blivet/iscsi.py ++++ b/blivet/iscsi.py +@@ -278,7 +278,7 @@ def _login(self, node_info, extra=None): + + if extra is None: + extra = dict() +- extra["node.startup"] = GLib.Variant("s", "automatic") ++ extra["node.startup"] = GLib.Variant("s", "onboot") + extra["node.session.auth.chap_algs"] = GLib.Variant("s", "SHA1,MD5") + + args = GLib.Variant("(sisisa{sv})", node_info.conn_info + (extra,)) diff --git a/0017-LVMPV-format-size-fix.patch b/0017-LVMPV-format-size-fix.patch new file mode 100644 index 0000000000000000000000000000000000000000..8b6308e0f911a8720447ea7c6b3bc1b4968ae415 --- /dev/null +++ b/0017-LVMPV-format-size-fix.patch @@ -0,0 +1,516 @@ +From 6373572308111c154c323a099103fabaaeace792 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 10:03:17 +0100 +Subject: [PATCH 1/6] Use pvs info from static data to get PV size in PVSize + +No need for a special code for this, we can reuse the existing +code from LVM static data. +--- + blivet/tasks/pvtask.py | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/blivet/tasks/pvtask.py b/blivet/tasks/pvtask.py +index b6f1896a3..3bbab7cbc 100644 +--- a/blivet/tasks/pvtask.py ++++ b/blivet/tasks/pvtask.py +@@ -27,6 +27,7 @@ + + from ..errors import PhysicalVolumeError + from ..size import Size, B ++from ..static_data import pvs_info + + from . import availability + from . import task +@@ -55,13 +56,12 @@ def do_task(self): # pylint: disable=arguments-differ + :raises :class:`~.errors.PhysicalVolumeError`: if size cannot be obtained + """ + +- try: +- pv_info = blockdev.lvm.pvinfo(self.pv.device) +- pv_size = pv_info.pv_size +- except blockdev.LVMError as e: +- raise PhysicalVolumeError(e) ++ pvs_info.drop_cache() ++ pv_info = pvs_info.cache.get(self.pv.device) ++ if pv_info is None: ++ raise PhysicalVolumeError("Failed to get PV info for %s" % self.pv.device) + +- return Size(pv_size) ++ return Size(pv_info.pv_size) + + + class PVResize(task.BasicApplication, dfresize.DFResizeTask): + +From cc0ad43477e201c8da8f7bffd04c845ea9e57f1c Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 10:05:13 +0100 +Subject: [PATCH 2/6] Get the actual PV format size for LVMPV format + +--- + blivet/formats/lvmpv.py | 2 ++ + blivet/populator/helpers/lvm.py | 2 ++ + tests/unit_tests/populator_test.py | 2 ++ + 3 files changed, 6 insertions(+) + +diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py +index aa5cc0a5a..70f4697fc 100644 +--- a/blivet/formats/lvmpv.py ++++ b/blivet/formats/lvmpv.py +@@ -102,6 +102,8 @@ def __init__(self, **kwargs): + # when set to True, blivet will try to resize the PV to fill all available space + self._grow_to_fill = False + ++ self._target_size = self._size ++ + def __repr__(self): + s = DeviceFormat.__repr__(self) + s += (" vg_name = %(vg_name)s vg_uuid = %(vg_uuid)s" +diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py +index 0cf47ba43..e22c52088 100644 +--- a/blivet/populator/helpers/lvm.py ++++ b/blivet/populator/helpers/lvm.py +@@ -114,6 +114,8 @@ def _get_kwargs(self): + log.warning("PV %s has no pe_start", name) + if pv_info.pv_free: + kwargs["free"] = Size(pv_info.pv_free) ++ if pv_info.pv_size: ++ kwargs["size"] = Size(pv_info.pv_size) + + return kwargs + +diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py +index 2d8175f2a..0429e8d44 100644 +--- a/tests/unit_tests/populator_test.py ++++ b/tests/unit_tests/populator_test.py +@@ -981,6 +981,7 @@ def test_run(self, *args): + pv_info.vg_uuid = sentinel.vg_uuid + pv_info.pe_start = 0 + pv_info.pv_free = 0 ++ pv_info.pv_size = "10g" + + vg_device = Mock() + vg_device.id = 0 +@@ -1012,6 +1013,7 @@ def test_run(self, *args): + pv_info.vg_extent_count = 2500 + pv_info.vg_free_count = 0 + pv_info.vg_pv_count = 1 ++ pv_info.pv_size = "10g" + + with patch("blivet.static_data.lvm_info.PVsInfo.cache", new_callable=PropertyMock) as mock_pvs_cache: + mock_pvs_cache.return_value = {device.path: pv_info} + +From 99fc0b2e9c8c42a894eee7bc6c850364ed85d313 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 13:35:38 +0100 +Subject: [PATCH 3/6] Update PV format size after adding/removing the PV + to/from the VG + +Unfortunately LVM substracts VG metadata from the reported PV size +so we need to make sure to update the size after the vgextend and +vgreduce operation. +--- + blivet/devices/lvm.py | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 661dc6e06..93f3ccbe7 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -343,12 +343,24 @@ def _remove(self, member): + if lv.status and not status: + lv.teardown() + ++ # update LVMPV format size --> PV format has different size when in VG ++ try: ++ fmt._size = fmt._target_size = fmt._size_info.do_task() ++ except errors.PhysicalVolumeError as e: ++ log.warning("Failed to obtain current size for device %s: %s", fmt.device, e) ++ + def _add(self, member): + try: + blockdev.lvm.vgextend(self.name, member.path) + except blockdev.LVMError as err: + raise errors.LVMError(err) + ++ # update LVMPV format size --> PV format has different size when in VG ++ try: ++ member.format._size = member.format._target_size = member.format._size_info.do_task() ++ except errors.PhysicalVolumeError as e: ++ log.warning("Failed to obtain current size for device %s: %s", member.path, e) ++ + def _add_log_vol(self, lv): + """ Add an LV to this VG. """ + if lv in self._lvs: + +From b6a9d661cd99e6973d8555a1ac587da49fd6d3df Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 14:22:07 +0100 +Subject: [PATCH 4/6] Use LVMPV format size when calculating VG size and free + space + +For existing PVs we need to check the format size instead of +simply expecting the format is fully resized to match the size of +the underlying block device. +--- + blivet/devices/lvm.py | 63 ++++++++++++++++++++++++++----------------- + 1 file changed, 39 insertions(+), 24 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 93f3ccbe7..d0b0b2b9c 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -534,40 +534,55 @@ def reserved_percent(self, value): + + self._reserved_percent = value + +- def _get_pv_usable_space(self, pv): ++ def _get_pv_metadata_space(self, pv): ++ """ Returns how much space will be used by VG metadata in given PV ++ This depends on type of the PV, PE size and PE start. ++ """ + if isinstance(pv, MDRaidArrayDevice): +- return self.align(pv.size - 2 * pv.format.pe_start) ++ return 2 * pv.format.pe_start ++ else: ++ return pv.format.pe_start ++ ++ def _get_pv_usable_space(self, pv): ++ """ Return how much space can be actually used on given PV. ++ This takes into account: ++ - VG metadata that is/will be stored in this PV ++ - the actual PV format size (which might differ from ++ the underlying block device size) ++ """ ++ ++ if pv.format.exists and pv.format.size and self.exists: ++ # PV format exists, we got its size and VG also exists ++ # -> all metadata is already accounted in the PV format size ++ return pv.format.size ++ elif pv.format.exists and pv.format.size and not self.exists: ++ # PV format exists, we got its size, but the VG doesn't exist ++ # -> metadata size is not accounted in the PV format size ++ return self.align(pv.format.size - self._get_pv_metadata_space(pv)) + else: +- return self.align(pv.size - pv.format.pe_start) ++ # something else -> either the PV format is not yet created or ++ # we for some reason failed to get size of the format, either way ++ # lets use the underlying block device size and calculate the ++ # metadata size ourselves ++ return self.align(pv.size - self._get_pv_metadata_space(pv)) + + @property + def lvm_metadata_space(self): +- """ The amount of the space LVM metadata cost us in this VG's PVs """ +- # NOTE: we either specify data alignment in a PV or the default is used +- # which is both handled by pv.format.pe_start, but LVM takes into +- # account also the underlying block device which means that e.g. +- # for an MD RAID device, it tries to align everything also to chunk +- # size and alignment offset of such device which may result in up +- # to a twice as big non-data area +- # TODO: move this to either LVMPhysicalVolume's pe_start property once +- # formats know about their devices or to a new LVMPhysicalVolumeDevice +- # class once it exists +- diff = Size(0) +- for pv in self.pvs: +- diff += pv.size - self._get_pv_usable_space(pv) +- +- return diff ++ """ The amount of the space LVM metadata cost us in this VG's PVs ++ Note: we either specify data alignment in a PV or the default is used ++ which is both handled by pv.format.pe_start, but LVM takes into ++ account also the underlying block device which means that e.g. ++ for an MD RAID device, it tries to align everything also to chunk ++ size and alignment offset of such device which may result in up ++ to a twice as big non-data area ++ """ ++ return sum(self._get_pv_metadata_space(pv) for pv in self.pvs) + + @property + def size(self): + """ The size of this VG """ + # TODO: just ask lvm if isModified returns False +- +- # sum up the sizes of the PVs, subtract the unusable (meta data) space +- size = sum(pv.size for pv in self.pvs) +- size -= self.lvm_metadata_space +- +- return size ++ return sum(self._get_pv_usable_space(pv) for pv in self.pvs) + + @property + def extents(self): + +From cd4ce45b78aae26424294c3e4dd8d082eb985af6 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 14:28:56 +0100 +Subject: [PATCH 5/6] Add more tests for PV and VG size and free space + +--- + tests/storage_tests/devices_test/lvm_test.py | 101 +++++++++++++++++++ + 1 file changed, 101 insertions(+) + +diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py +index f64af8943..2217eeb63 100644 +--- a/tests/storage_tests/devices_test/lvm_test.py ++++ b/tests/storage_tests/devices_test/lvm_test.py +@@ -25,6 +25,18 @@ def setUp(self): + self.assertIsNone(disk.format.type) + self.assertFalse(disk.children) + ++ def _get_pv_size(self, pv): ++ out = subprocess.check_output(["pvs", "-o", "pv_size", "--noheadings", "--nosuffix", "--units=b", pv]) ++ return blivet.size.Size(out.decode().strip()) ++ ++ def _get_vg_size(self, vg): ++ out = subprocess.check_output(["vgs", "-o", "vg_size", "--noheadings", "--nosuffix", "--units=b", vg]) ++ return blivet.size.Size(out.decode().strip()) ++ ++ def _get_vg_free(self, vg): ++ out = subprocess.check_output(["vgs", "-o", "vg_free", "--noheadings", "--nosuffix", "--units=b", vg]) ++ return blivet.size.Size(out.decode().strip()) ++ + def _clean_up(self): + self.storage.reset() + for disk in self.storage.disks: +@@ -74,6 +86,8 @@ def test_lvm_basic(self): + self.assertIsInstance(pv, blivet.devices.PartitionDevice) + self.assertIsNotNone(pv.format) + self.assertEqual(pv.format.type, "lvmpv") ++ pv_size = self._get_pv_size(pv.path) ++ self.assertEqual(pv.format.size, pv_size) + + vg = self.storage.devicetree.get_device_by_name(self.vgname) + self.assertIsNotNone(vg) +@@ -84,6 +98,10 @@ def test_lvm_basic(self): + self.assertEqual(pv.format.vg_uuid, vg.uuid) + self.assertEqual(len(vg.parents), 1) + self.assertEqual(vg.parents[0], pv) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) + + lv = self.storage.devicetree.get_device_by_name("%s-blivetTestLV" % self.vgname) + self.assertIsNotNone(lv) +@@ -131,6 +149,13 @@ def test_lvm_thin(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + pool = self.storage.devicetree.get_device_by_name("%s-blivetTestPool" % self.vgname) + self.assertIsNotNone(pool) + self.assertTrue(pool.is_thin_pool) +@@ -177,6 +202,14 @@ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space + vg.reserved_space) ++ + raidlv = self.storage.devicetree.get_device_by_name("%s-blivetTestRAIDLV" % self.vgname) + self.assertIsNotNone(raidlv) + self.assertTrue(raidlv.is_raid_lv) +@@ -233,6 +266,13 @@ def test_lvm_cache(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("%s-blivetTestCachedLV" % self.vgname) + self.assertIsNotNone(cachedlv) + self.assertTrue(cachedlv.cached) +@@ -272,6 +312,13 @@ def test_lvm_cache_attach(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("%s-blivetTestCachedLV" % self.vgname) + self.assertIsNotNone(cachedlv) + cachepool = self.storage.devicetree.get_device_by_name("%s-blivetTestFastLV" % self.vgname) +@@ -327,6 +374,13 @@ def test_lvm_cache_create_and_attach(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("%s-blivetTestCachedLV" % self.vgname) + self.assertIsNotNone(cachedlv) + +@@ -342,6 +396,13 @@ def test_lvm_cache_create_and_attach(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("%s-blivetTestCachedLV" % self.vgname) + self.assertIsNotNone(cachedlv) + self.assertTrue(cachedlv.cached) +@@ -371,6 +432,13 @@ def test_lvm_pvs_add_remove(self): + + self.storage.do_it() + ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + # create a second PV + disk2 = self.storage.devicetree.get_device_by_path(self.vdevs[1]) + self.assertIsNotNone(disk2) +@@ -385,6 +453,17 @@ def test_lvm_pvs_add_remove(self): + self.storage.do_it() + self.storage.reset() + ++ pv1 = self.storage.devicetree.get_device_by_name(pv1.name) ++ pv1_size = self._get_pv_size(pv1.path) ++ self.assertEqual(pv1.format.size, pv1_size) ++ ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + # add the PV to the existing VG + vg = self.storage.devicetree.get_device_by_name(self.vgname) + pv2 = self.storage.devicetree.get_device_by_name(pv2.name) +@@ -393,6 +472,17 @@ def test_lvm_pvs_add_remove(self): + self.storage.devicetree.actions.add(ac) + self.storage.do_it() + ++ pv2 = self.storage.devicetree.get_device_by_name(pv2.name) ++ pv2_size = self._get_pv_size(pv2.path) ++ self.assertEqual(pv2.format.size, pv2_size) ++ ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + self.assertEqual(pv2.format.vg_name, vg.name) + + self.storage.reset() +@@ -414,6 +504,17 @@ def test_lvm_pvs_add_remove(self): + + self.storage.do_it() + ++ pv2 = self.storage.devicetree.get_device_by_name(pv2.name) ++ pv2_size = self._get_pv_size(pv2.path) ++ self.assertEqual(pv2.format.size, pv2_size) ++ ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + self.assertIsNone(pv1.format.type) + + self.storage.reset() + +From a4a7791a150e190089c8f935c7a5aae7fa9bc5a5 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 15:16:29 +0100 +Subject: [PATCH 6/6] Add a separate test case for LVMPV smaller than the block + device + +--- + tests/storage_tests/devices_test/lvm_test.py | 50 ++++++++++++++++++++ + 1 file changed, 50 insertions(+) + +diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py +index 2217eeb63..25d9d71bb 100644 +--- a/tests/storage_tests/devices_test/lvm_test.py ++++ b/tests/storage_tests/devices_test/lvm_test.py +@@ -524,3 +524,53 @@ def test_lvm_pvs_add_remove(self): + self.assertIsNotNone(vg) + self.assertEqual(len(vg.pvs), 1) + self.assertEqual(vg.pvs[0].name, pv2.name) ++ ++ def test_lvm_pv_size(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.storage.initialize_disk(disk) ++ ++ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv", ++ parents=[disk]) ++ self.storage.create_device(pv) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pv = self.storage.devicetree.get_device_by_name(pv.name) ++ self.assertIsNotNone(pv) ++ ++ pv.format.update_size_info() ++ self.assertTrue(pv.format.resizable) ++ ++ ac = blivet.deviceaction.ActionResizeFormat(pv, blivet.size.Size("50 MiB")) ++ self.storage.devicetree.actions.add(ac) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pv = self.storage.devicetree.get_device_by_name(pv.name) ++ self.assertIsNotNone(pv) ++ self.assertEqual(pv.format.size, blivet.size.Size("50 MiB")) ++ pv_size = self._get_pv_size(pv.path) ++ self.assertEqual(pv_size, pv.format.size) ++ ++ vg = self.storage.new_vg(name=self.vgname, parents=[pv]) ++ self.storage.create_device(vg) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pv = self.storage.devicetree.get_device_by_name(pv.name) ++ self.assertIsNotNone(pv) ++ pv_size = self._get_pv_size(pv.path) ++ self.assertEqual(pv_size, pv.format.size) ++ ++ vg = self.storage.devicetree.get_device_by_name(self.vgname) ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) diff --git a/0018-Include-additional-information-in-PartitioningError.patch b/0018-Include-additional-information-in-PartitioningError.patch new file mode 100644 index 0000000000000000000000000000000000000000..805b80f93e91ecd173c236603284097cc446911f --- /dev/null +++ b/0018-Include-additional-information-in-PartitioningError.patch @@ -0,0 +1,85 @@ +From 964ad0ab491678ad73adb4c894d38619bdcfd1b2 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 22 Jan 2025 13:16:43 +0100 +Subject: [PATCH] Include additional information in PartitioningError + +The generic 'Unable to allocate requested partition scheme' is not +very helpful, we should try to include additional information if +possible. + +Resolves: RHEL-84686 +--- + blivet/partitioning.py | 25 ++++++++++++++++++++++--- + 1 file changed, 22 insertions(+), 3 deletions(-) + +diff --git a/blivet/partitioning.py b/blivet/partitioning.py +index ec9918d41..86841152b 100644 +--- a/blivet/partitioning.py ++++ b/blivet/partitioning.py +@@ -34,7 +34,7 @@ + from .flags import flags + from .devices import Device, PartitionDevice, device_path_to_name + from .size import Size +-from .i18n import _ ++from .i18n import _, N_ + from .util import compare + + import logging +@@ -681,6 +681,11 @@ def resolve_disk_tags(disks, tags): + return [disk for disk in disks if any(tag in disk.tags for tag in tags)] + + ++class PartitioningErrors: ++ NO_PRIMARY = N_("no primary partition slots available") ++ NO_SLOTS = N_("no free partition slots") ++ ++ + def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + """ Allocate partitions based on requested features. + +@@ -763,6 +768,7 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + part_type = None + growth = 0 # in sectors + # loop through disks ++ errors = {} + for _disk in req_disks: + try: + disklabel = disklabels[_disk.path] +@@ -798,6 +804,10 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + if new_part_type is None: + # can't allocate any more partitions on this disk + log.debug("no free partition slots on %s", _disk.name) ++ if PartitioningErrors.NO_SLOTS in errors.keys(): ++ errors[PartitioningErrors.NO_SLOTS].append(_disk.name) ++ else: ++ errors[PartitioningErrors.NO_SLOTS] = [_disk.name] + continue + + if _part.req_primary and new_part_type != parted.PARTITION_NORMAL: +@@ -808,7 +818,11 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + new_part_type = parted.PARTITION_NORMAL + else: + # we need a primary slot and none are free on this disk +- log.debug("no primary slots available on %s", _disk.name) ++ log.debug("no primary partition slots available on %s", _disk.name) ++ if PartitioningErrors.NO_PRIMARY in errors.keys(): ++ errors[PartitioningErrors.NO_PRIMARY].append(_disk.name) ++ else: ++ errors[PartitioningErrors.NO_PRIMARY] = [_disk.name] + continue + elif _part.req_part_type is not None and \ + new_part_type != _part.req_part_type: +@@ -968,7 +982,12 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + break + + if free is None: +- raise PartitioningError(_("Unable to allocate requested partition scheme.")) ++ if not errors: ++ msg = _("Unable to allocate requested partition scheme.") ++ else: ++ errors_by_disk = (", ".join(disks) + ": " + _(error) for error, disks in errors.items()) ++ msg = _("Unable to allocate requested partition scheme on requested disks:\n%s") % "\n".join(errors_by_disk) ++ raise PartitioningError(msg) + + _disk = use_disk + disklabel = _disk.format diff --git a/0019-Make-ActionDestroyFormat-optional.patch b/0019-Make-ActionDestroyFormat-optional.patch new file mode 100644 index 0000000000000000000000000000000000000000..006531df6368e5250828e0ac03717c42374d9d5d --- /dev/null +++ b/0019-Make-ActionDestroyFormat-optional.patch @@ -0,0 +1,310 @@ +From 8368cab41a1f34452b4c624768245517391ce400 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 16 May 2025 17:15:17 +0200 +Subject: [PATCH 1/5] Allow ActionDestroyFormat to be marked as optional + +When we are also planning to remove the device, failing to remove +the format is not critical so we can ignore it in these cases. + +Resolves: RHEL-84685 +Resolves: RHEL-84663 +--- + blivet/deviceaction.py | 37 +++++++++++++++++++++++-------------- + 1 file changed, 23 insertions(+), 14 deletions(-) + +diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py +index b22e00c36..2e6a8489f 100644 +--- a/blivet/deviceaction.py ++++ b/blivet/deviceaction.py +@@ -734,12 +734,13 @@ class ActionDestroyFormat(DeviceAction): + obj = ACTION_OBJECT_FORMAT + type_desc_str = N_("destroy format") + +- def __init__(self, device): ++ def __init__(self, device, optional=False): + if device.format_immutable: + raise ValueError("this device's formatting cannot be modified") + + DeviceAction.__init__(self, device) + self.orig_format = self.device.format ++ self.optional = optional + + if not device.format.destroyable: + raise ValueError("resource to destroy this format type %s is unavailable" % device.format.type) +@@ -758,21 +759,29 @@ def execute(self, callbacks=None): + """ wipe the filesystem signature from the device """ + # remove any flag if set + super(ActionDestroyFormat, self).execute(callbacks=callbacks) +- status = self.device.status +- self.device.setup(orig=True) +- if hasattr(self.device, 'set_rw'): +- self.device.set_rw() + +- self.format.destroy() +- udev.settle() +- if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported: +- if self.format.parted_flag: +- self.device.unset_flag(self.format.parted_flag) +- self.device.disk.original_format.commit_to_disk() +- udev.settle() ++ try: ++ status = self.device.status ++ self.device.setup(orig=True) ++ if hasattr(self.device, 'set_rw'): ++ self.device.set_rw() + +- if not status: +- self.device.teardown() ++ self.format.destroy() ++ udev.settle() ++ if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported: ++ if self.format.parted_flag: ++ self.device.unset_flag(self.format.parted_flag) ++ self.device.disk.original_format.commit_to_disk() ++ udev.settle() ++ ++ if not status: ++ self.device.teardown() ++ except Exception as e: # pylint: disable=broad-except ++ if self.optional: ++ log.error("Ignoring error when executing optional action: Failed to destroy format on %s: %s.", ++ self.device.name, str(e)) ++ else: ++ raise + + def cancel(self): + if not self._applied: + +From 94e0ec7f24129159ac5f4fe455f37b85ceb9a004 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 16 May 2025 17:28:40 +0200 +Subject: [PATCH 2/5] Make ActionDestroyFormat optional when device is also + removed + +In both destroy_device and recursive_remove we try to remove both +the device and its format. In these cases the format destroy can +be considered to be optional and we don't need to fail just +because we failed to remove the format. + +Resolves: RHEL-84685 +Resolves: RHEL-84663 +--- + blivet/blivet.py | 2 +- + blivet/devicetree.py | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/blivet/blivet.py b/blivet/blivet.py +index 399992a41..53206d973 100644 +--- a/blivet/blivet.py ++++ b/blivet/blivet.py +@@ -915,7 +915,7 @@ def destroy_device(self, device): + if device.format.exists and device.format.type and \ + not device.format_immutable: + # schedule destruction of any formatting while we're at it +- self.devicetree.actions.add(ActionDestroyFormat(device)) ++ self.devicetree.actions.add(ActionDestroyFormat(device, optional=True)) + + action = ActionDestroyDevice(device) + self.devicetree.actions.add(action) +diff --git a/blivet/devicetree.py b/blivet/devicetree.py +index 6a27b1e71..4ec955002 100644 +--- a/blivet/devicetree.py ++++ b/blivet/devicetree.py +@@ -261,7 +261,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T + if actions: + if leaf.format.exists and not leaf.protected and \ + not leaf.format_immutable: +- self.actions.add(ActionDestroyFormat(leaf)) ++ self.actions.add(ActionDestroyFormat(leaf, optional=True)) + + self.actions.add(ActionDestroyDevice(leaf)) + else: +@@ -273,7 +273,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T + + if not device.format_immutable: + if actions: +- self.actions.add(ActionDestroyFormat(device)) ++ self.actions.add(ActionDestroyFormat(device, optional=True)) + else: + device.format = None + + +From 610b65450fa00a9b8b129ef733536ca080edc6fe Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 19 May 2025 14:24:06 +0200 +Subject: [PATCH 3/5] tests: Add a simple test case for optional format destroy + action + +Related: RHEL-84685 +Related: RHEL-84663 +--- + tests/unit_tests/devices_test/lvm_test.py | 28 +++++++++++++++++++++++ + 1 file changed, 28 insertions(+) + +diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py +index ed30772fd..7ec3ed0ae 100644 +--- a/tests/unit_tests/devices_test/lvm_test.py ++++ b/tests/unit_tests/devices_test/lvm_test.py +@@ -1172,3 +1172,31 @@ def test_vdo_compression_deduplication_change(self): + with patch("blivet.devices.lvm.blockdev.lvm") as lvm: + self.b.do_it() + lvm.vdo_enable_deduplication.assert_called_with(vg.name, vdopool.lvname) ++ ++ ++@patch("blivet.devices.lvm.LVMLogicalVolumeDevice._external_dependencies", new=[]) ++@patch("blivet.devices.lvm.LVMLogicalVolumeBase._external_dependencies", new=[]) ++@patch("blivet.devices.dm.DMDevice._external_dependencies", new=[]) ++class BlivetLVMOptionalDestroyTest(BlivetLVMUnitTest): ++ ++ def test_optional_format_destroy(self, *args): # pylint: disable=unused-argument ++ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), ++ size=Size("10 GiB"), exists=True) ++ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) ++ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], exists=True, size=Size("5 GiB"), ++ fmt=blivet.formats.get_format("xfs", exists=True)) ++ ++ for dev in (pv, vg, lv): ++ self.b.devicetree._add_device(dev) ++ ++ self.b.destroy_device(lv) ++ fmt_ac = self.b.devicetree.actions.find(action_type="destroy", object_type="format") ++ self.assertTrue(fmt_ac) ++ self.assertTrue(fmt_ac[0].optional) ++ ++ with patch("blivet.devices.lvm.blockdev.lvm") as lvm: ++ lvm.lvactivate.side_effect = RuntimeError() ++ try: ++ self.b.do_it() ++ except RuntimeError: ++ self.fail("Optional format destroy action is not optional") + +From d5c9b690f702d38a9db5bed5d728a1a25fe31077 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 20 May 2025 13:02:00 +0200 +Subject: [PATCH 4/5] tests: Add test case for removing broken thin pool + +Related: RHEL-84685 +Related: RHEL-84663 +--- + tests/storage_tests/devices_test/lvm_test.py | 51 ++++++++++++++++++++ + 1 file changed, 51 insertions(+) + +diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py +index 25d9d71bb..aae9da8b5 100644 +--- a/tests/storage_tests/devices_test/lvm_test.py ++++ b/tests/storage_tests/devices_test/lvm_test.py +@@ -1,6 +1,7 @@ + import os + import shutil + import subprocess ++import tempfile + + from ..storagetestcase import StorageTestCase + +@@ -574,3 +575,53 @@ def test_lvm_pv_size(self): + self.assertEqual(vg_size, vg.size) + vg_free = self._get_vg_free(vg.name) + self.assertEqual(vg_free, vg.free_space) ++ ++ def _break_thin_pool(self): ++ os.system("vgchange -an %s >/dev/null 2>&1" % self.vgname) ++ ++ # changing transaction_id for the pool prevents it from being activated ++ with tempfile.NamedTemporaryFile(prefix="blivet_test") as temp: ++ os.system("vgcfgbackup -f %s %s >/dev/null 2>&1" % (temp.name, self.vgname)) ++ os.system("sed -i 's/transaction_id =.*/transaction_id = 123456/' %s >/dev/null 2>&1" % temp.name) ++ os.system("vgcfgrestore -f %s %s --force >/dev/null 2>&1" % (temp.name, self.vgname)) ++ ++ def test_lvm_broken_thin(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.initialize_disk(disk) ++ ++ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv", ++ parents=[disk]) ++ self.storage.create_device(pv) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ vg = self.storage.new_vg(name=self.vgname, parents=[pv]) ++ self.storage.create_device(vg) ++ ++ pool = self.storage.new_lv(thin_pool=True, size=blivet.size.Size("50 MiB"), ++ parents=[vg], name="blivetTestPool") ++ self.storage.create_device(pool) ++ ++ self.storage.do_it() ++ ++ # intentionally break the thin pool created above ++ self._break_thin_pool() ++ ++ self.storage.reset() ++ ++ pool = self.storage.devicetree.get_device_by_name("%s-blivetTestPool" % self.vgname) ++ self.assertIsNotNone(pool) ++ ++ # check that the pool cannot be activated ++ try: ++ pool.setup() ++ except Exception: # pylint: disable=broad-except ++ pass ++ else: ++ self.fail("Failed to break thinpool for tests") ++ ++ # verify that the pool can be destroyed even if it cannot be activated ++ self.storage.recursive_remove(pool) ++ self.storage.do_it() + +From 6f0625e06a2ea69be8042cf5e76048b97a1025e1 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 29 Apr 2025 08:09:06 +0200 +Subject: [PATCH 5/5] Fix expected exception type when activating devices in + populor + +We are no longer raising libblockdev exceptions in our public API +calls (see #1014) so when calling setup() ourselves we need to +catch our exceptions instead of libblockdev ones as well. + +Related: RHEL-84685 +Related: RHEL-84663 +--- + blivet/populator/helpers/luks.py | 2 +- + blivet/populator/helpers/lvm.py | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/blivet/populator/helpers/luks.py b/blivet/populator/helpers/luks.py +index 72da248ed..0b72920e3 100644 +--- a/blivet/populator/helpers/luks.py ++++ b/blivet/populator/helpers/luks.py +@@ -161,7 +161,7 @@ def run(self): + self.device.format.passphrase = passphrase + try: + self.device.format.setup() +- except blockdev.BlockDevError: ++ except LUKSError: + self.device.format.passphrase = None + else: + break +diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py +index e22c52088..cdf97e405 100644 +--- a/blivet/populator/helpers/lvm.py ++++ b/blivet/populator/helpers/lvm.py +@@ -29,7 +29,7 @@ + from ... import udev + from ...devicelibs import lvm + from ...devices.lvm import LVMVolumeGroupDevice, LVMLogicalVolumeDevice, LVMInternalLVtype +-from ...errors import DeviceTreeError, DuplicateVGError ++from ...errors import DeviceTreeError, DuplicateVGError, LVMError + from ...flags import flags + from ...size import Size + from ...storage_log import log_method_call +@@ -289,7 +289,7 @@ def add_lv(lv): + if flags.auto_dev_updates: + try: + lv_device.setup() +- except blockdev.LVMError: ++ except LVMError: + log.warning("failed to activate lv %s", lv_device.name) + lv_device.controllable = False + diff --git a/0020-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch b/0020-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch new file mode 100644 index 0000000000000000000000000000000000000000..1f0261213425b9995733dd57c9612c98a663f265 --- /dev/null +++ b/0020-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch @@ -0,0 +1,385 @@ +From 025cc54c04132a056ba863ecdb1d05f3465632a5 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 27 May 2025 15:21:23 +0200 +Subject: [PATCH 1/3] Add some basic partitioning storage tests + +This supplements the existing tests which use sparse files. These +new test cases actually run do_it() and check the result after +reset. More test cases will follow. + +Related: RHEL-93967 +--- + .../devices_test/partition_test.py | 148 ++++++++++++++++++ + 1 file changed, 148 insertions(+) + +diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py +index 73da87b43..30e6a0151 100644 +--- a/tests/storage_tests/devices_test/partition_test.py ++++ b/tests/storage_tests/devices_test/partition_test.py +@@ -5,6 +5,7 @@ + + from unittest.mock import patch + ++import blivet + from blivet.devices import DiskFile + from blivet.devices import PartitionDevice + from blivet.devicelibs.gpt import gpt_part_uuid_for_mountpoint +@@ -13,6 +14,8 @@ + from blivet.size import Size + from blivet.util import sparsetmpfile + ++from ..storagetestcase import StorageTestCase ++ + + class PartitionDeviceTestCase(unittest.TestCase): + +@@ -266,3 +269,148 @@ def test_dev_part_type_gpt_autodiscover(self): + flags.gpt_discoverable_partitions = True + self.assertEqual(device.part_type_uuid, + gpt_part_uuid_for_mountpoint("/home")) ++ ++ ++class PartitionTestCase(StorageTestCase): ++ ++ def setUp(self): ++ super().setUp() ++ ++ disks = [os.path.basename(vdev) for vdev in self.vdevs] ++ self.storage = blivet.Blivet() ++ self.storage.exclusive_disks = disks ++ self.storage.reset() ++ ++ # make sure only the targetcli disks are in the devicetree ++ for disk in self.storage.disks: ++ self.assertTrue(disk.path in self.vdevs) ++ self.assertIsNone(disk.format.type) ++ self.assertFalse(disk.children) ++ ++ def _clean_up(self): ++ self.storage.reset() ++ for disk in self.storage.disks: ++ if disk.path not in self.vdevs: ++ raise RuntimeError("Disk %s found in devicetree but not in disks created for tests" % disk.name) ++ self.storage.recursive_remove(disk) ++ ++ self.storage.do_it() ++ ++ def test_msdos_basic(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos")) ++ ++ for i in range(4): ++ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk], ++ primary=True) ++ self.storage.create_device(part) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.assertEqual(disk.format.type, "disklabel") ++ self.assertEqual(disk.format.label_type, "msdos") ++ self.assertIsNotNone(disk.format.parted_disk) ++ self.assertIsNotNone(disk.format.parted_device) ++ self.assertEqual(len(disk.format.partitions), 4) ++ self.assertEqual(len(disk.format.primary_partitions), 4) ++ self.assertEqual(len(disk.children), 4) ++ ++ for i in range(4): ++ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1)) ++ self.assertIsNotNone(part) ++ self.assertEqual(part.type, "partition") ++ self.assertEqual(part.disk, disk) ++ self.assertEqual(part.size, Size("100 MiB")) ++ self.assertTrue(part.is_primary) ++ self.assertFalse(part.is_extended) ++ self.assertFalse(part.is_logical) ++ self.assertIsNotNone(part.parted_partition) ++ ++ def test_msdos_extended(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos")) ++ ++ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk]) ++ self.storage.create_device(part) ++ ++ part = self.storage.new_partition(size=Size("1 GiB"), parents=[disk], ++ part_type=parted.PARTITION_EXTENDED) ++ self.storage.create_device(part) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ for i in range(4): ++ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk], ++ part_type=parted.PARTITION_LOGICAL) ++ self.storage.create_device(part) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.assertEqual(disk.format.type, "disklabel") ++ self.assertEqual(disk.format.label_type, "msdos") ++ self.assertIsNotNone(disk.format.parted_disk) ++ self.assertIsNotNone(disk.format.parted_device) ++ self.assertEqual(len(disk.format.partitions), 6) ++ self.assertEqual(len(disk.format.primary_partitions), 1) ++ self.assertEqual(len(disk.children), 6) ++ ++ for i in range(4, 8): ++ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1)) ++ self.assertIsNotNone(part) ++ self.assertEqual(part.type, "partition") ++ self.assertEqual(part.disk, disk) ++ self.assertEqual(part.size, Size("100 MiB")) ++ self.assertFalse(part.is_primary) ++ self.assertFalse(part.is_extended) ++ self.assertTrue(part.is_logical) ++ self.assertIsNotNone(part.parted_partition) ++ ++ def test_gpt_basic(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt")) ++ ++ for i in range(4): ++ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],) ++ self.storage.create_device(part) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.assertEqual(disk.format.type, "disklabel") ++ self.assertEqual(disk.format.label_type, "gpt") ++ self.assertIsNotNone(disk.format.parted_disk) ++ self.assertIsNotNone(disk.format.parted_device) ++ self.assertEqual(len(disk.format.partitions), 4) ++ self.assertEqual(len(disk.format.primary_partitions), 4) ++ self.assertEqual(len(disk.children), 4) ++ ++ for i in range(4): ++ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1)) ++ self.assertIsNotNone(part) ++ self.assertEqual(part.type, "partition") ++ self.assertEqual(part.disk, disk) ++ self.assertEqual(part.size, Size("100 MiB")) ++ self.assertTrue(part.is_primary) ++ self.assertFalse(part.is_extended) ++ self.assertFalse(part.is_logical) ++ self.assertIsNotNone(part.parted_partition) + +From ab6261adbdfedc26c6b0712a42a3dd9169cabb38 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 27 May 2025 14:10:49 +0200 +Subject: [PATCH 2/3] Wipe end partition before creating it as well as the + start + +We are currently overwritting start of the newly created partition +with zeroes to remove any filesystem metadata that might occupy +the space. This extends this functionality to end of the partition +to remove 1.0 MD metadata that might be there. + +Resolves: RHEL-93967 +--- + blivet/devices/partition.py | 20 +++++++++++++++++++- + 1 file changed, 19 insertions(+), 1 deletion(-) + +diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py +index 89470d9fb..fc9a97be7 100644 +--- a/blivet/devices/partition.py ++++ b/blivet/devices/partition.py +@@ -659,7 +659,7 @@ def _wipe(self): + """ Wipe the partition metadata. + + Assumes that the partition metadata is located at the start +- of the partition and occupies no more than 1 MiB. ++ and end of the partition and occupies no more than 1 MiB. + + Erases in block increments. Erases the smallest number of blocks + such that at least 1 MiB is erased or the whole partition is +@@ -692,6 +692,24 @@ def _wipe(self): + # things to settle. + udev.settle() + ++ if count >= part_len: ++ # very small partition, we wiped it completely already ++ return ++ ++ # now do the end of the partition as well (RAID 1.0 metadata) ++ end = self.parted_partition.geometry.end ++ cmd = ["dd", "if=/dev/zero", "of=%s" % device, "bs=%d" % bs, ++ "seek=%d" % (end - count), "count=%d" % count] ++ try: ++ util.run_program(cmd) ++ except OSError as e: ++ log.error(str(e)) ++ finally: ++ # If a udev device is created with the watch option, then ++ # a change uevent is synthesized and we need to wait for ++ # things to settle. ++ udev.settle() ++ + def _create(self): + """ Create the device. """ + log_method_call(self, self.name, status=self.status) + +From 936cccdf67e3ee612399bd3f0f8b383ca118ce9b Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 28 May 2025 11:01:14 +0200 +Subject: [PATCH 3/3] tests: Add tests for wiping stale metadata from new + partitions + +Related: RHEL-93967 +--- + .../devices_test/partition_test.py | 119 ++++++++++++++++++ + 1 file changed, 119 insertions(+) + +diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py +index 30e6a0151..87e4b1155 100644 +--- a/tests/storage_tests/devices_test/partition_test.py ++++ b/tests/storage_tests/devices_test/partition_test.py +@@ -1,6 +1,7 @@ + import os + import unittest + from uuid import UUID ++import blivet.deviceaction + import parted + + from unittest.mock import patch +@@ -414,3 +415,121 @@ def test_gpt_basic(self): + self.assertFalse(part.is_extended) + self.assertFalse(part.is_logical) + self.assertIsNotNone(part.parted_partition) ++ ++ def _partition_wipe_check(self): ++ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1") ++ self.assertIsNotNone(part1) ++ self.assertIsNone(part1.format.type) ++ ++ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "1"]) ++ self.assertEqual(out.strip(), "") ++ ++ part2 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "2") ++ self.assertIsNotNone(part2) ++ self.assertEqual(part2.format.type, "ext4") ++ ++ try: ++ part2.format.do_check() ++ except blivet.errors.FSError as e: ++ self.fail("Partition wipe corrupted filesystem on an adjacent partition: %s" % str(e)) ++ ++ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "2"]) ++ self.assertEqual(out.strip(), "ext4") ++ ++ def test_partition_wipe_ext(self): ++ """ Check that any stray filesystem metadata are removed before creating a partition """ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt")) ++ ++ # create two partitions with ext4 ++ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk], ++ fmt=blivet.formats.get_format("ext4")) ++ self.storage.create_device(part1) ++ ++ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True, ++ fmt=blivet.formats.get_format("ext4")) ++ self.storage.create_device(part2) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # remove the first partition (only the partition without removing the format) ++ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1") ++ ac = blivet.deviceaction.ActionDestroyDevice(part1) ++ self.storage.devicetree.actions.add(ac) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # create the first partition again (without ext4) ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk]) ++ self.storage.create_device(part1) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that ++ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call ++ part1._post_create = lambda: None ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # make sure the ext4 signature is not present on part1 (and untouched on part2) ++ self._partition_wipe_check() ++ ++ def test_partition_wipe_mdraid(self): ++ """ Check that any stray RAID metadata are removed before creating a partition """ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt")) ++ ++ # create two partitions, one empty, one with ext4 ++ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk]) ++ self.storage.create_device(part1) ++ ++ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True, ++ fmt=blivet.formats.get_format("ext4")) ++ self.storage.create_device(part2) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # create MD RAID with metadata 1.0 on the first partition ++ ret = blivet.util.run_program(["mdadm", "--create", "blivetMDTest", "--level=linear", ++ "--metadata=1.0", "--raid-devices=1", "--force", part1.path]) ++ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test") ++ ret = blivet.util.run_program(["mdadm", "--stop", "/dev/md/blivetMDTest"]) ++ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test") ++ ++ # now remove the partition without removing the array first ++ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1") ++ ac = blivet.deviceaction.ActionDestroyDevice(part1) ++ self.storage.devicetree.actions.add(ac) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # create the first partition again (without format) ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk]) ++ self.storage.create_device(part1) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that ++ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call ++ part1._post_create = lambda: None ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # make sure the mdmember signature is not present on part1 (and ext4 is untouched on part2) ++ self._partition_wipe_check() diff --git a/0021-Tell-LVM-DBus-to-refresh-its-internal-status-during-reset.patch b/0021-Tell-LVM-DBus-to-refresh-its-internal-status-during-reset.patch new file mode 100644 index 0000000000000000000000000000000000000000..0db1138965c521136b41cf4f71ab8b88b632fee0 --- /dev/null +++ b/0021-Tell-LVM-DBus-to-refresh-its-internal-status-during-reset.patch @@ -0,0 +1,65 @@ +From ee19e665276fd7cd6477da9bee59641b1de1a916 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 27 Jun 2025 11:28:09 +0200 +Subject: [PATCH] Tell LVM DBus to refresh it's internal status during reset + +Unfortunately some users run wipefs thinking it's enough +to remove all devices on top of the disk cleanly. +In cases where the PV is not directly on the disk, LVM DBus +doesn't get a udev event and doesn't remove the VG and LVs from +DBus so we think these still exist. + +Resolves: RHEL-93967 +--- + blivet/devicelibs/lvm.py | 19 +++++++++++++++++++ + blivet/populator/populator.py | 3 +++ + 2 files changed, 22 insertions(+) + +diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py +index 38e1bc1bc..47cc3e5d7 100644 +--- a/blivet/devicelibs/lvm.py ++++ b/blivet/devicelibs/lvm.py +@@ -36,6 +36,7 @@ + log = logging.getLogger("blivet") + + from . import raid ++from .. import safe_dbus + from ..size import Size + from ..i18n import N_ + from ..flags import flags +@@ -284,3 +285,21 @@ def recommend_thpool_chunk_size(thpool_size): + + def is_valid_cache_md_size(md_size): + return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE ++ ++ ++def lvm_dbusd_refresh(): ++ lvm_soname = blockdev.get_plugin_soname(blockdev.Plugin.LVM) ++ if 'dbus' not in lvm_soname: ++ return ++ ++ try: ++ rc = safe_dbus.call_sync("com.redhat.lvmdbus1", ++ "/com/redhat/lvmdbus1/Manager", ++ "com.redhat.lvmdbus1.Manager", ++ "Refresh", ++ None) ++ except safe_dbus.DBusCallError as e: ++ log.error("Exception occurred when calling LVM DBusD refresh: %s", str(e)) ++ else: ++ if rc[0] != 0: ++ log.error("Failed to call LVM DBusD refresh: %s", rc) +diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py +index 2ddea6618..da3b33cac 100644 +--- a/blivet/populator/populator.py ++++ b/blivet/populator/populator.py +@@ -453,6 +453,9 @@ def _populate(self): + disklib.update_volume_info() + self.drop_device_info_cache() + ++ # force LVM DBusD to refresh its internal state ++ lvm.lvm_dbusd_refresh() ++ + if flags.auto_dev_updates and availability.BLOCKDEV_MPATH_PLUGIN.available: + blockdev.mpath.set_friendly_names(flags.multipath_friendly_names) + diff --git a/0022-Change-expected-Stratis-metadata-size.patch b/0022-Change-expected-Stratis-metadata-size.patch new file mode 100644 index 0000000000000000000000000000000000000000..93cbef14008fa12979bfd3ac7a52adbfe933406f --- /dev/null +++ b/0022-Change-expected-Stratis-metadata-size.patch @@ -0,0 +1,36 @@ +From e5ad3beb216d25a601d8f35c2b2a97d15cbb0d39 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 3 Sep 2024 10:40:27 +0200 +Subject: [PATCH] tests: Change expected Stratis metadata size for stratisd + 3.7.0 + +Stratis changes its metadata and the way "stratis-predict-usage" +predicts its size so we need to change our expectations too. + +Resolves: RHEL-102299 +--- + tests/unit_tests/devicefactory_test.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/tests/unit_tests/devicefactory_test.py b/tests/unit_tests/devicefactory_test.py +index 0a5e6a839..89ca34a33 100644 +--- a/tests/unit_tests/devicefactory_test.py ++++ b/tests/unit_tests/devicefactory_test.py +@@ -942,7 +942,7 @@ def _get_size_delta(self, devices=None): + :keyword devices: list of factory-managed devices or None + :type devices: list(:class:`blivet.devices.StorageDevice`) or NoneType + """ +- return Size("550 MiB") # huge stratis pool metadata ++ return Size("1.3 GiB") # huge stratis pool metadata + + def _validate_factory_device(self, *args, **kwargs): + device = args[0] +@@ -968,7 +968,7 @@ def _validate_factory_device(self, *args, **kwargs): + else: + self.assertAlmostEqual(device.pool.size, + device.size, +- delta=Size("600 MiB")) ++ delta=Size("1.3 GiB")) + + self.assertEqual(device.pool.encrypted, kwargs.get("container_encrypted", False)) + diff --git a/0023-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch b/0023-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch new file mode 100644 index 0000000000000000000000000000000000000000..84520763ad337d77d1cead7afb20474c37950eca --- /dev/null +++ b/0023-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch @@ -0,0 +1,65 @@ +From 598902388a09e2dd60b0b0f1e556c4661899be68 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 1 Aug 2025 15:03:09 +0200 +Subject: [PATCH] Add a pre-wipe fixup function for LVM logical volumes + +LVs scheduled to be removed are always activated to remove the +format during installation. If there is a read-only LV with the +skip activation flag with MD metadata this means after activating +the LV to remove the format the MD array is auto-assembled by udev +preventing us from removing it. For this special case, we simply +stop the array before removing the format. + +Resolves: RHEL-93966 +--- + blivet/deviceaction.py | 3 +++ + blivet/devices/lvm.py | 19 +++++++++++++++++++ + 2 files changed, 22 insertions(+) + +diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py +index 2e6a8489..6590898f 100644 +--- a/blivet/deviceaction.py ++++ b/blivet/deviceaction.py +@@ -766,6 +766,9 @@ class ActionDestroyFormat(DeviceAction): + if hasattr(self.device, 'set_rw'): + self.device.set_rw() + ++ if hasattr(self.device, 'pre_format_destroy'): ++ self.device.pre_format_destroy() ++ + self.format.destroy() + udev.settle() + if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported: +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index d0b0b2b9..10ed2c94 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -2791,6 +2791,25 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + except blockdev.LVMError as err: + raise errors.LVMError(err) + ++ def pre_format_destroy(self): ++ """ Fixup needed to run before wiping this device """ ++ if self.ignore_skip_activation > 0: ++ # the LV was not activated during the initial scan so if there is an MD array on it ++ # it will now also get activated and we need to stop it to be able to remove the LV ++ try: ++ info = blockdev.md.examine(self.path) ++ except blockdev.MDRaidError: ++ pass ++ else: ++ # give udev a bit time to activate the array so we can deactivate it again ++ time.sleep(5) ++ log.info("MD metadata found on LV with skip activation, stopping the array %s", ++ info.device) ++ try: ++ blockdev.md.deactivate(info.device) ++ except blockdev.MDRaidError as err: ++ log.info("failed to deactivate %s: %s", info.device, str(err)) ++ + @type_specific + def _pre_create(self): + LVMLogicalVolumeBase._pre_create(self) +-- +2.50.1 + diff --git a/python-blivet.spec b/python-blivet.spec index 39ed3efbe27c9c7d7b62996a0ef6ff1f739a20ae..62c6bd5eb620ae256c3157071f4eafc9a3af412e 100644 --- a/python-blivet.spec +++ b/python-blivet.spec @@ -1,16 +1,16 @@ -%define anolis_release 2 +%define anolis_release 3 Summary: A python module for system storage configuration Name: python-blivet Url: https://storageapis.wordpress.com/projects/blivet -Version: 3.10.0 +Source0: http://github.com/storaged-project/blivet/archive/blivet-3.10.0.tar.gz +Source1: http://github.com/storaged-project/blivet/archive/blivet-3.10.0-tests.tar.gz +Version: 3.10.0 Release: %{anolis_release}%{?dist} Epoch: 1 License: LGPL-2.1-or-later %global realname blivet %global realversion %{version} -Source0: https://github.com/storaged-project/%{realname}/releases/download/%{realname}-%{realversion}/%{realname}-%{realversion}.tar.gz -Source1: https://github.com/storaged-project/%{realname}/releases/download/%{realname}-%{realversion}/%{realname}-%{realversion}-tests.tar.gz Patch0: 0001-remove-btrfs-plugin.patch @@ -23,6 +23,15 @@ Patch6: 0009-mod_pass_in_stratis_test.patch Patch7: 0010-Fix_running_tests_in_FIPS_mode.patch Patch8: 0011-Make-GPT-default-label-type-on-all-architectures.patch Patch9: 0012-Add-sw_64-support.patch +Patch10: 0014-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch +Patch11: 0017-LVMPV-format-size-fix.patch +Patch12: 0020-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch +Patch13: 0015-iscsi-Use-node-startup-onboot-option-for-Login.patch +Patch14: 0022-Change-expected-Stratis-metadata-size.patch +Patch15: 0018-Include-additional-information-in-PartitioningError.patch +Patch16: 0023-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch +Patch17: 0019-Make-ActionDestroyFormat-optional.patch +Patch18: 0021-Tell-LVM-DBus-to-refresh-its-internal-status-during-reset.patch # Versions of required components (done so we make sure the buildrequires # match the requires versions of things). @@ -130,6 +139,25 @@ make PYTHON=%{__python3} DESTDIR=%{buildroot} install %doc README.md ChangeLog examples %changelog +* Thu Dec 04 2025 wency_cn - 3.10.0-3 +- Prevent LVM failures by avoiding creation of empty devices file during PV removal. + Resolves: RHEL-93966 +- Ensure accurate PV size reporting and correct VG space calculations after LVM operations. + Resolves: RHEL-93967 +- Ensure stale metadata is wiped from both start and end of partitions to prevent detection issues. + Resolves: RHEL-93967 +- Ensure iSCSI devices are available during boot by logging in at startup. + Resolves: RHEL-53719 +- Update test expectations to prevent false failures with stratisd 3.7.0 metadata changes. + Resolves: RHEL-102299 +- Provide clearer partitioning error messages to improve troubleshooting during disk allocation. + Resolves: RHEL-84686 +- Apply this patch to prevent MD array conflicts during installation by proactively stopping unintended assemblies on LVM logical volumes. + Resolves: RHEL-93966 +- Fix format removal failures during device deletion and improve resilience when handling broken storage devices. + Resolves: RHEL-65846 +- Ensure LVM state is refreshed after wipefs to prevent stale device detection. + Resolves: RHEL-93967 * Mon Mar 31 2025 fenghui - 3.10.0-2 - arch: Support sw_64 arch