fdisk /dev/sdb
zpool create -f -o ashift=12 ssd-zpool /dev/sdb2 zfs set compression=lz4 atime=off ssd-zpool zpool list
root@TP-PVE-249:~# zpool list NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT rpool 928G 169G 759G - 8% 18% 1.00x ONLINE - ssd-zpool 448G 444K 448G - 0% 0% 1.00x ONLINE -
root@aac:~# echo "$[3*1024*1024*1024]" 3221225472
vi /etc/modprobe.d/zfs.conf
: options zfs zfs_arc_max=3221225472
echo "$[3 * 1024*1024*1024]" >/sys/module/zfs/parameters/zfs_arc_max
update-initramfs -u -k all reboot
因系統碟毀損重新安裝, 將原本放在 zfs 資料碟加回重新安裝的主機內
root@nuc:/etc/postfix# fdisk -l /dev/nvme0n1 Disk /dev/nvme0n1: 953.9 GiB, 1024209543168 bytes, 2000409264 sectors Disk model: PLEXTOR PX-1TM9PeGN Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disklabel type: gpt Disk identifier: 31E1D4CF-870D-E741-B28E-2305FDF86533 Device Start End Sectors Size Type /dev/nvme0n1p1 2048 2000392191 2000390144 953.9G Solaris /usr & Apple ZFS /dev/nvme0n1p9 2000392192 2000408575 16384 8M Solaris reserved 1
root@nuc:/etc/postfix# zdb -l /dev/nvme0n1p1 ------------------------------------ LABEL 0 ------------------------------------ version: 5000 name: 'local-zfs' state: 0 txg: 5514533 pool_guid: 1902468729180364296 errata: 0 hostid: 585158084 hostname: 'nuc' top_guid: 1144094455533164821 guid: 1144094455533164821 vdev_children: 1 vdev_tree: type: 'disk' id: 0 guid: 1144094455533164821 path: '/dev/nvme0n1p1' devid: 'nvme-PLEXTOR_PX-1TM9PeGN_P02927100475-part1' phys_path: 'pci-0000:6d:00.0-nvme-1' whole_disk: 1 metaslab_array: 65 metaslab_shift: 33 ashift: 12 asize: 1024195035136 is_log: 0 DTL: 25995 create_txg: 4 features_for_read: com.delphix:hole_birth com.delphix:embedded_data labels = 0 1 2 3
root@nuc:/etc/postfix# zpool import -d /dev/nvme0n1p1 pool: local-zfs id: 1902468729180364296 state: ONLINE status: The pool was last accessed by another system. action: The pool can be imported using its name or numeric identifier and the '-f' flag. see: http://zfsonlinux.org/msg/ZFS-8000-EY config: local-zfs ONLINE nvme0n1 ONLINE
zpool import -f local-zfs
zpool export ssd-zfs zpool import ssd-zfs ssd-zpool
zpool destroy ssd2-zfs
Message: cannot import 'rpool' : more than one matching pool import by numeric ID instead Error: 1
就可以使用 zpool import 指令來處理
/sbin/zpool import -N 13396254673059535051
zpool status pbs-zpool
pool: pbs-zpool state: DEGRADED status: One or more devices is currently being resilvered. The pool will continue to function, possibly in a degraded state. action: Wait for the resilver to complete. scan: resilver in progress since Thu Oct 29 15:43:26 2020 355G scanned at 49.8M/s, 96.9G issued at 13.6M/s, 1.55T total 97.1G resilvered, 6.09% done, 1 days 07:14:16 to go config: NAME STATE READ WRITE CKSUM pbs-zpool DEGRADED 0 0 0 sdb1 DEGRADED 11 52 0 too many errors errors: No known data errors
zpool attach pbs-zpool sdb1 sdf1
如果這指令執行幾秒沒有出現錯誤訊息, 就可以透過
zpool status pbs-zpool
來查看修復與同步狀況
pool: pbs-zpool state: DEGRADED status: One or more devices is currently being resilvered. The pool will continue to function, possibly in a degraded state. action: Wait for the resilver to complete. scan: resilver in progress since Thu Oct 29 15:43:26 2020 355G scanned at 46.4M/s, 101G issued at 13.2M/s, 1.55T total 102G resilvered, 6.34% done, 1 days 08:09:31 to go config: NAME STATE READ WRITE CKSUM pbs-zpool DEGRADED 0 0 0 mirror-0 DEGRADED 0 0 0 sdb1 DEGRADED 11 52 0 too many errors sdf1 ONLINE 0 0 0 (resilvering) errors: No known data errors
zpool clear pbs-zpool zpool detach pbs-zpool sdb1 zpool status pbs-zpool
應該可看到如下的訊息
pool: pbs-zpool state: ONLINE scan: resilvered 1.35T in 1 days 04:24:31 with 0 errors on Mon Nov 2 10:25:43 2020 config: NAME STATE READ WRITE CKSUM pbs-zpool ONLINE 0 0 0 sdf1 ONLINE 0 0 0 errors: No known data errors
zpool set autoexpand=on pbs-zpool parted /dev/sdd resizepart 1 [X.XGB]? quit zpool online -e pbs-zpool sdd1 df -h
實際處理訊息如下
root@TP-PVE-252:/# parted /dev/sdd GNU Parted 3.2 Using /dev/sdd Welcome to GNU Parted! Type 'help' to view a list of commands. (parted) resizepart Partition number? 1 End? [8002GB]? Error: Partition(s) 1 on /dev/sdd have been written, but we have been unable to inform the kernel of the change, probably because it/they are in use. As a result, the old partition(s) will remain in use. You should reboot now before making further changes. Ignore/Cancel? Ignore/Cancel? I (parted) quit Information: You may need to update /etc/fstab. root@TP-PVE-252:/# zpool online -e pbs-zpool sdd1 root@TP-PVE-252:/# df -h Filesystem Size Used Avail Use% Mounted on udev 3.9G 0 3.9G 0% /dev tmpfs 796M 28M 768M 4% /run : : tmpfs 796M 0 796M 0% /run/user/0 pbs-zpool 7.1T 1.4T 5.7T 20% /pbs-zpool root@TP-PVE-252:/#
root@pve-1:~# zpool status pool: rpool state: ONLINE scan: scrub repaired 0B in 0 days 00:00:10 with 0 errors on Fri Dec 3 17:24:52 2066 config: NAME STATE READ WRITE CKSUM rpool ONLINE 0 0 0 raidz2-0 ONLINE 0 0 0 scsi-35000c50099b187df-part3 ONLINE 0 0 0 scsi-35000c50095c609f7-part3 ONLINE 0 0 0 scsi-35000c50099b18c6b-part3 ONLINE 0 0 0 scsi-35000c50099b185e3-part3 ONLINE 0 0 0 scsi-35000c50099b18453-part3 ONLINE 0 0 0 scsi-35000c50099b18ebb-part3 ONLINE 0 0 0
zpool add rpool cache nvme0n1
root@pve1:~# zpool status pool: rpool state: ONLINE scan: none requested config: NAME STATE READ WRITE CKSUM rpool ONLINE 0 0 0 raidz2-0 ONLINE 0 0 0 scsi-35000c50099b187df-part3 ONLINE 0 0 0 scsi-35000c50095c609f7-part3 ONLINE 0 0 0 scsi-35000c50099b18c6b-part3 ONLINE 0 0 0 scsi-35000c50099b185e3-part3 ONLINE 0 0 0 scsi-35000c50099b18453-part3 ONLINE 0 0 0 scsi-35000c50099b18ebb-part3 ONLINE 0 0 0 cache nvme0n1 ONLINE 0 0 0
zpool iostat -v
root@pve1:~# zpool iostat -v capacity operations bandwidth pool alloc free read write read write -------------------------------- ----- ----- ----- ----- ----- ----- rpool 180G 5.28T 0 243 2.62K 5.17M raidz2 180G 5.28T 0 243 2.62K 5.17M scsi-35000c50099b187df-part3 - - 0 39 447 860K scsi-35000c50095c609f7-part3 - - 0 40 352 889K scsi-35000c50099b18c6b-part3 - - 0 42 536 905K scsi-35000c50099b185e3-part3 - - 0 40 456 879K scsi-35000c50099b18453-part3 - - 0 40 360 882K scsi-35000c50099b18ebb-part3 - - 0 40 530 878K cache - - - - - - nvme0n1 64.9G 308G 0 15 2 1.56M -------------------------------- ----- ----- ----- ----- ----- -----
zpool remove zfs2TB sdc
# zpool status zfs2TB pool: zfs2TB state: ONLINE scan: scrub repaired 0B in 0 days 00:55:00 with 0 errors on Sun Dec 13 01:19:07 2020 config: NAME STATE READ WRITE CKSUM zfs2TB ONLINE 0 0 0 ata-WDC_WD2002FAEX-007BA0_WD-WMAY03424496 ONLINE 0 0 0 cache sdc ONLINE 0 0 0
# zpool status zfs2TB pool: zfs2TB state: ONLINE scan: scrub repaired 0B in 0 days 00:55:00 with 0 errors on Sun Dec 13 01:19:07 2020 config: NAME STATE READ WRITE CKSUM zfs2TB ONLINE 0 0 0 ata-WDC_WD2002FAEX-007BA0_WD-WMAY03424496 ONLINE 0 0 0
zpool add pbs-zpool special mirror /dev/nvme0n1 /dev/nvme1n1
加入之後
root@h470:~# zpool status pbs-zpool pool: pbs-zpool state: ONLINE scan: scrub repaired 0B in 02:55:43 with 0 errors on Sun Nov 12 03:19:52 2023 config: NAME STATE READ WRITE CKSUM pbs-zpool ONLINE 0 0 0 sda1 ONLINE 0 0 0 special nvme0n1 ONLINE 0 0 0 nvme1n1 ONLINE 0 0 0
watch zpool iostat -v pbs-zpool