Merge pull request #1731 from dbungert/zfs-fixes

Zfs fixes
This commit is contained in:
Dan Bungert 2023-07-19 19:14:01 -06:00 committed by GitHub
commit 3dfaa9ccea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 49 additions and 13 deletions

View File

@ -288,7 +288,7 @@ Storage configuration is a complex topic and the description of the desired conf
#### Supported layouts
The two supported layouts at the time of writing are "lvm" and "direct".
The three supported layouts at the time of writing are "lvm", "direct", and "zfs".
```yaml
storage:
layout:
@ -296,6 +296,9 @@ storage:
storage:
layout:
name: direct
storage:
layout:
name: zfs
```
By default these will install to the largest disk in a system, but you can supply a match spec (see below) to indicate which disk to use:
@ -308,7 +311,7 @@ storage:
serial: CT*
storage:
layout:
name: disk
name: direct
match:
ssd: true
```

View File

@ -46,11 +46,10 @@ autoinstall:
flag: boot, wipe: superblock, grub_device: true}
- {type: format, id: d1p1_format, label: efi, fstype: fat32,
volume: d1p1}
- {type: mount, id: d1p1_mount, device: d1p1_format, path: /boot/efi}
- {type: partition, id: d1p2, number: 2, size: -1, device: d1}
- {type: zpool, id: d1_rpool, pool: rpool, vdevs: [d1p2], mountpoint: /,
pool_properties: {ashift: 12},
fs_properties: {acltype: posixacl, relatime: on, canmount: on,
compression: gzip, devices: off, xattr: sa}}
- {type: mount, id: d1p1_mount, device: d1p1_format, path: /boot/efi}

View File

@ -46,6 +46,7 @@ autoinstall:
flag: boot, wipe: superblock, grub_device: true}
- {type: format, id: d1p1_format, label: efi, fstype: fat32,
volume: d1p1}
- {type: mount, id: d1p1_mount, device: d1p1_format, path: /boot/efi}
- {type: partition, id: d1p2, number: 2, size: -1, device: d1}
- {type: zpool, id: d1_rpool, pool: rpool, vdevs: [d1p2], mountpoint: /,
@ -78,6 +79,3 @@ autoinstall:
properties: {canmount: off, mountpoint: /}}
- {type: zfs, id: d1_rpool_USERDATA_home, pool: d1_rpool,
volume: /USERDATA/home}
# workaround target exist problem
- {type: mount, id: d1p1_mount, device: d1p1_format, path: /boot/efi}

View File

@ -0,0 +1,38 @@
#cloud-config
autoinstall:
version: 1
identity:
realname: ''
hostname: ubuntu
username: ubuntu
password: '$6$wdAcoXrU039hKYPd$508Qvbe7ObUnxoj15DRCkzC3qO7edjH0VV7BPNRDYK4QR8ofJaEEF2heacn0QgD.f8pO8SNp83XNdWG6tocBM1'
source:
id: ubuntu-server-minimal
early-commands:
- apt-get install -y zfsutils-linux
late-commands:
# Let's avoid recreating LP: #1993318
- zpool set cachefile= rpool
- cp /etc/zfs/zpool.cache "/target/etc/zfs/"
- mkdir -p "/etc/zfs/zfs-list.cache" "/target/etc/zfs/zfs-list.cache"
- truncate -s 0 /etc/zfs/zfs-list.cache/rpool
- >-
env -i
ZEVENT_POOL=rpool
ZED_ZEDLET_DIR=/etc/zfs/zed.d
ZEVENT_SUBCLASS=history_event
ZFS=zfs
ZEVENT_HISTORY_INTERNAL_NAME=create
/etc/zfs/zed.d/history_event-zfs-list-cacher.sh
- >-
sh -c
'sed -E "s|\t/target/?|\t/|g" "/etc/zfs/zfs-list.cache/rpool" > "/target/etc/zfs/zfs-list.cache/rpool"'
- rm -f "/etc/zfs/zfs-list.cache/rpool"
storage:
layout:
name: zfs

View File

@ -1095,9 +1095,6 @@ class ZPool:
def mount(self):
return self.mountpoint
async def pre_shutdown(self, command_runner):
await command_runner.run(['zpool', 'export', self.pool])
@fsobj("zfs")
class ZFS:
@ -1521,6 +1518,7 @@ class FilesystemModel(object):
if c is None:
# Ignore any action we do not know how to process yet
# (e.g. bcache)
log.debug(f'ignoring unknown action type {action["type"]}')
continue
kw = {}
field_names = set()

View File

@ -477,8 +477,8 @@ class FilesystemController(SubiquityController, FilesystemManipulator):
bpool_part = self.create_partition(device, gap_boot, dict(fstype=None))
rpool_part = self.create_partition(device, gap_rest, dict(fstype=None))
self.create_zpool(rpool_part, 'rpool', '/')
self.create_zpool(bpool_part, 'bpool', '/boot')
self.create_zpool(rpool_part, 'rpool', '/')
@functools.singledispatchmethod
def start_guided(self, target: GuidedStorageTarget,
@ -1324,5 +1324,5 @@ class FilesystemController(SubiquityController, FilesystemManipulator):
if not self.reset_partition_only:
await self.app.command_runner.run(
['umount', '--recursive', '/target'])
for pool in self.model._all(type='zpool'):
await pool.pre_shutdown(self.app.command_runner)
if len(self.model._all(type='zpool')) > 0:
await self.app.command_runner.run(['zpool', 'export', '-a'])