From 802e9ef8db8d3f80da5d851e5cda2f7a0a32c2d3 Mon Sep 17 00:00:00 2001 From: Olivier Gayot Date: Fri, 12 Jan 2024 15:14:08 +0100 Subject: [PATCH] storage: add property to tell if FS objects are "on" remote storage Signed-off-by: Olivier Gayot --- subiquity/models/filesystem.py | 30 ++++++ subiquity/models/tests/test_filesystem.py | 114 ++++++++++++++++++++++ 2 files changed, 144 insertions(+) diff --git a/subiquity/models/filesystem.py b/subiquity/models/filesystem.py index 77ff8f17..223a651f 100644 --- a/subiquity/models/filesystem.py +++ b/subiquity/models/filesystem.py @@ -693,6 +693,9 @@ class _Device(_Formattable, ABC): part.number = next_num next_num += 1 + def on_remote_storage(self) -> bool: + raise NotImplementedError + @fsobj("dasd") class Dasd: @@ -820,6 +823,11 @@ class Disk(_Device): return None return id.encode("utf-8").decode("unicode_escape").strip() + def on_remote_storage(self) -> bool: + if self.nvme_controller and self.nvme_controller.transport == "tcp": + return True + return False + @fsobj("partition") class Partition(_Formattable): @@ -933,6 +941,9 @@ class Partition(_Formattable): # check the partition number. return self.device.ptable == "msdos" and self.number > 4 + def on_remote_storage(self) -> bool: + return self.device.on_remote_storage() + @fsobj("raid") class Raid(_Device): @@ -1017,6 +1028,13 @@ class Raid(_Device): # What is a device that makes up this device referred to as? component_name = "component" + def on_remote_storage(self) -> bool: + for dev in self.devices: + if dev.on_remote_storage(): + return True + + return False + @fsobj("lvm_volgroup") class LVM_VolGroup(_Device): @@ -1042,6 +1060,12 @@ class LVM_VolGroup(_Device): # What is a device that makes up this device referred to as? component_name = "PV" + def on_remote_storage(self) -> bool: + for dev in self.devices: + if dev.on_remote_storage(): + return True + return False + @fsobj("lvm_partition") class LVM_LogicalVolume(_Formattable): @@ -1073,6 +1097,9 @@ class LVM_LogicalVolume(_Formattable): ok_for_raid = False ok_for_lvm_vg = False + def on_remote_storage(self) -> bool: + return self.volgroup.on_remote_storage() + LUKS_OVERHEAD = 16 * (2**20) @@ -1152,6 +1179,9 @@ class DM_Crypt: def size(self): return self.volume.size - LUKS_OVERHEAD + def on_remote_storage(self) -> bool: + return self.volume.on_remote_storage() + @fsobj("device") class ArbitraryDevice(_Device): diff --git a/subiquity/models/tests/test_filesystem.py b/subiquity/models/tests/test_filesystem.py index cac31d0c..97cf00fe 100644 --- a/subiquity/models/tests/test_filesystem.py +++ b/subiquity/models/tests/test_filesystem.py @@ -15,6 +15,7 @@ import pathlib import unittest +from typing import Optional from unittest import mock import attr @@ -31,6 +32,7 @@ from subiquity.models.filesystem import ( Filesystem, FilesystemModel, NotFinalPartitionError, + NVMeController, Partition, RecoveryKeyHandler, ZPool, @@ -263,6 +265,20 @@ def make_zfs(model, *, pool, **kw): return zfs +def make_nvme_controller( + model, + *, + transport: str, + tcp_addr: Optional[str] = None, + tcp_port: Optional[str] = None, +) -> NVMeController: + ctrler = NVMeController( + m=model, transport=transport, tcp_addr=tcp_addr, tcp_port=tcp_port + ) + model._actions.append(ctrler) + return ctrler + + class TestFilesystemModel(unittest.TestCase): def _test_ok_for_xxx(self, model, make_new_device, attr, test_partitions=True): # Newly formatted devs are ok_for_raid @@ -1617,3 +1633,101 @@ class TestRecoveryKeyHandler(SubiTestCase): ), expected, ) + + +class TestOnRemoteStorage(SubiTestCase): + def test_disk__on_local_storage(self): + m, d = make_model_and_disk(name="sda", serial="sata0") + self.assertFalse(d.on_remote_storage()) + + d = make_disk(name="nvme0n1", serial="pcie0") + self.assertFalse(d.on_remote_storage()) + + ctrler = make_nvme_controller(model=m, transport="pcie") + + d = make_disk( + fs_model=m, name="nvme1n1", nvme_controller=ctrler, serial="pcie1" + ) + self.assertFalse(d.on_remote_storage()) + + def test_disk__on_remote_storage(self): + m = make_model() + + ctrler = make_nvme_controller( + model=m, transport="tcp", tcp_addr="172.16.82.78", tcp_port=4420 + ) + + d = make_disk(fs_model=m, name="nvme0n1", nvme_controller=ctrler, serial="tcp0") + self.assertTrue(d.on_remote_storage()) + + def test_partition(self): + m, d = make_model_and_disk(name="sda", serial="sata0") + p = make_partition(model=m, device=d) + + # For partitions, this is directly dependent on the underlying device. + with mock.patch.object(d, "on_remote_storage", return_value=False): + self.assertFalse(p.on_remote_storage()) + with mock.patch.object(d, "on_remote_storage", return_value=True): + self.assertTrue(p.on_remote_storage()) + + def test_raid(self): + m, raid = make_model_and_raid() + + d0, d1 = list(raid.devices) + + d0_local = mock.patch.object(d0, "on_remote_storage", return_value=False) + d1_local = mock.patch.object(d1, "on_remote_storage", return_value=False) + d0_remote = mock.patch.object(d0, "on_remote_storage", return_value=True) + d1_remote = mock.patch.object(d1, "on_remote_storage", return_value=True) + + # If at least one of the underlying disk is on remote storage, the raid + # should be considered on remote storage too. + with d0_local, d1_local: + self.assertFalse(raid.on_remote_storage()) + with d0_local, d1_remote: + self.assertTrue(raid.on_remote_storage()) + with d0_remote, d1_local: + self.assertTrue(raid.on_remote_storage()) + with d0_remote, d1_remote: + self.assertTrue(raid.on_remote_storage()) + + def test_lvm_volgroup(self): + m, vg = make_model_and_vg() + + # make_vg creates a VG with a single PV (i.e., a disk). + d0 = vg.devices[0] + + with mock.patch.object(d0, "on_remote_storage", return_value=False): + self.assertFalse(vg.on_remote_storage()) + with mock.patch.object(d0, "on_remote_storage", return_value=True): + self.assertTrue(vg.on_remote_storage()) + + d1 = make_disk(fs_model=m) + + vg.devices.append(d1) + + d0_local = mock.patch.object(d0, "on_remote_storage", return_value=False) + d1_local = mock.patch.object(d1, "on_remote_storage", return_value=False) + d0_remote = mock.patch.object(d0, "on_remote_storage", return_value=True) + d1_remote = mock.patch.object(d1, "on_remote_storage", return_value=True) + + # Just like RAIDs, if at least one of the underlying PV is on remote + # storage, the VG should be considered on remote storage too. + with d0_local, d1_local: + self.assertFalse(vg.on_remote_storage()) + with d0_local, d1_remote: + self.assertTrue(vg.on_remote_storage()) + with d0_remote, d1_local: + self.assertTrue(vg.on_remote_storage()) + with d0_remote, d1_remote: + self.assertTrue(vg.on_remote_storage()) + + def test_lvm_logical_volume(self): + m, lv = make_model_and_lv() + + vg = lv.volgroup + # For LVs, this is directly dependent on the underlying VG. + with mock.patch.object(vg, "on_remote_storage", return_value=False): + self.assertFalse(lv.on_remote_storage()) + with mock.patch.object(vg, "on_remote_storage", return_value=True): + self.assertTrue(lv.on_remote_storage())