mirror of
http://git.nowherejezfoltodf4jiyl6r56jnzintap5vyjlia7fkirfsnfizflqd.onion/nihilist/selfhosting-blogposts.git
synced 2025-05-17 04:36:58 +00:00
add selfhosting tutorials
This commit is contained in:
parent
95c33c8b41
commit
cc3824e6a2
1900 changed files with 32727 additions and 0 deletions
349
raid1disks/index.md
Normal file
349
raid1disks/index.md
Normal file
|
@ -0,0 +1,349 @@
|
|||
# Mdadm Raid 1 Setup
|
||||
|
||||

|
||||
|
||||
In this tutorial we're going to setup a raid1 accross 2 harddrives
|
||||
|
||||
## **Initial Setup**
|
||||
|
||||
On my new server, i asked for 2x 2TBs harddrives, we're going to set them up as a raid1 virtual disk using mdadm and cfdisk
|
||||
|
||||
|
||||
root@Megapede ~ # lsblk
|
||||
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
|
||||
**sda 8:0 0 1.8T 0 disk
|
||||
sdb 8:16 0 1.8T 0 disk**
|
||||
|
||||
nvme0n1 259:0 0 476.9G 0 disk
|
||||
├─nvme0n1p1 259:1 0 32G 0 part
|
||||
│ └─md0 9:0 0 32G 0 raid1 [SWAP]
|
||||
├─nvme0n1p2 259:2 0 1G 0 part
|
||||
│ └─md1 9:1 0 1022M 0 raid1 /boot
|
||||
└─nvme0n1p3 259:3 0 443.9G 0 part
|
||||
└─md2 9:2 0 443.8G 0 raid1 /
|
||||
nvme1n1 259:4 0 476.9G 0 disk
|
||||
├─nvme1n1p1 259:5 0 32G 0 part
|
||||
│ └─md0 9:0 0 32G 0 raid1 [SWAP]
|
||||
├─nvme1n1p2 259:6 0 1G 0 part
|
||||
│ └─md1 9:1 0 1022M 0 raid1 /boot
|
||||
└─nvme1n1p3 259:7 0 443.9G 0 part
|
||||
└─md2 9:2 0 443.8G 0 raid1 /
|
||||
|
||||
root@Megapede ~ # cat /proc/mdstat
|
||||
Personalities : [raid1] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] [raid10]
|
||||
md2 : active raid1 nvme0n1p3[1] nvme1n1p3[0]
|
||||
465370432 blocks super 1.2 [2/2] [UU]
|
||||
bitmap: 0/4 pages [0KB], 65536KB chunk
|
||||
|
||||
md0 : active (auto-read-only) raid1 nvme0n1p1[1] nvme1n1p1[0]
|
||||
33520640 blocks super 1.2 [2/2] [UU]
|
||||
resync=PENDING
|
||||
|
||||
md1 : active raid1 nvme0n1p2[1] nvme1n1p2[0]
|
||||
1046528 blocks super 1.2 [2/2] [UU]
|
||||
|
||||
unused devices: <****none>
|
||||
|
||||
so here there's already 3 raid 1s : md0 , md1 and md2. to link the disks sda and sdb in a raid1 array, we're going to create md3:
|
||||
|
||||
|
||||
root@Megapede ~ # mdadm --stop /dev/md3
|
||||
mdadm: stopped /dev/md3
|
||||
root@Megapede ~ # sudo mdadm --zero-superblock /dev/sda
|
||||
sudo: unable to resolve host Megapede: Name or service not known
|
||||
root@Megapede ~ # sudo mdadm --zero-superblock /dev/sdb
|
||||
sudo: unable to resolve host Megapede: Name or service not known
|
||||
|
||||
root@Megapede ~ # vim /etc/fstab
|
||||
#remove/comment the fstab line that has md3
|
||||
|
||||
root@Megapede ~ # mdadm --create --verbose /dev/md3 --level=1 --raid-devices=2 /dev/sda /dev/sdb
|
||||
mdadm: Note: this array has metadata at the start and
|
||||
may not be suitable as a boot device. If you plan to
|
||||
store '/boot' on this device please ensure that
|
||||
your boot-loader understands md/v1.x metadata, or use
|
||||
--metadata=0.90
|
||||
mdadm: size set to 1953382464K
|
||||
mdadm: automatically enabling write-intent bitmap on large array
|
||||
Continue creating array? y
|
||||
mdadm: Defaulting to version 1.2 metadata
|
||||
mdadm: array /dev/md3 started.
|
||||
|
||||
root@Megapede ~ # lsblk
|
||||
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
|
||||
sda 8:0 0 1.8T 0 disk
|
||||
└─md3 9:3 0 1.8T 0 raid1
|
||||
sdb 8:16 0 1.8T 0 disk
|
||||
└─md3 9:3 0 1.8T 0 raid1
|
||||
|
||||
[...]
|
||||
|
||||
root@Megapede ~ # cat /proc/mdstat
|
||||
Personalities : [raid1] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] [raid10]
|
||||
md3 : active raid1 sdb[1] sda[0]
|
||||
1953382464 blocks super 1.2 [2/2] [UU]
|
||||
[>....................] resync = 0.9% (18044224/1953382464) finish=198.3min speed=162617K/sec
|
||||
bitmap: 15/15 pages [60KB], 65536KB chunk
|
||||
|
||||
|
||||
|
||||
this shows the synchronisation process, it may take some time, but you can proceed while this is ongoing.
|
||||
|
||||
|
||||
root@Megapede ~ # mkfs.ext4 -F /dev/md3
|
||||
mke2fs 1.47.0 (5-Feb-2023)
|
||||
Creating filesystem with 488345616 4k blocks and 122093568 inodes
|
||||
Filesystem UUID: 6386dec8-2bc3-432b-ac14-44e39e5dfb5c
|
||||
Superblock backups stored on blocks:
|
||||
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
|
||||
4096000, 7962624, 11239424, 20480000, 23887872, 71663616, 78675968,
|
||||
102400000, 214990848
|
||||
|
||||
Allocating group tables: done
|
||||
Writing inode tables: done
|
||||
Creating journal (262144 blocks): done
|
||||
Writing superblocks and filesystem accounting information: done
|
||||
|
||||
root@megapede ~ # mount /dev/md3 /mnt/md3
|
||||
|
||||
root@Megapede ~ # mkdir -p /mnt/md3
|
||||
|
||||
root@Megapede ~ # df -h -x devtmpfs -x tmpfs
|
||||
Filesystem Size Used Avail Use% Mounted on
|
||||
/dev/md2 436G 1.7G 412G 1% /
|
||||
/dev/md1 989M 117M 821M 13% /boot
|
||||
/dev/md3 1.8T 28K 1.7T 1% /mnt/md3
|
||||
|
||||
|
||||
|
||||
we formatted the md3 raid 1 disk as ext4, and now that our md3 device is mounted in /mnt/md3 we can see the available 1.8Tb space with the df command.
|
||||
|
||||
Next we need to save the config in /etc/mdadm/mdadm.conf so that the array can automatically be seen as active upon boots:
|
||||
|
||||
|
||||
mdadm --detail --scan
|
||||
ARRAY /dev/md/1 metadata=1.2 name=rescue:1 UUID=8005f92c:5ca33d93:b2a5c843:7d4500ba
|
||||
ARRAY /dev/md/0 metadata=1.2 name=rescue:0 UUID=9a83d519:88dc0912:2d34658b:d9ac4a20
|
||||
ARRAY /dev/md/2 metadata=1.2 name=rescue:2 UUID=082e5878:cad579b5:5b2d8dda:d41848f9
|
||||
ARRAY /dev/md3 metadata=1.2 name=Megapede:3 UUID=779a3d96:8a08002c:7c5c2530:aded4af8
|
||||
|
||||
root@megapede ~ # vim /etc/mdadm/mdadm.conf
|
||||
root@megapede ~ # cat /etc/mdadm/mdadm.conf
|
||||
# mdadm.conf
|
||||
#
|
||||
# !NB! Run update-initramfs -u after updating this file.
|
||||
# !NB! This will ensure that initramfs has an uptodate copy.
|
||||
#
|
||||
# Please refer to mdadm.conf(5) for information about this file.
|
||||
#
|
||||
|
||||
# by default (built-in), scan all partitions (/proc/partitions) and all
|
||||
# containers for MD superblocks. alternatively, specify devices to scan, using
|
||||
# wildcards if desired.
|
||||
#DEVICE partitions containers
|
||||
|
||||
# automatically tag new arrays as belonging to the local system
|
||||
HOMEHOST
|
||||
|
||||
# instruct the monitoring daemon where to send mail alerts
|
||||
MAILADDR root
|
||||
|
||||
# This configuration was auto-generated on Fri, 07 Jul 2023 11:02:42 +0200 by mkconf
|
||||
ARRAY /dev/md/0 metadata=1.2 name=rescue:0 UUID=9a83d519:88dc0912:2d34658b:d9ac4a20
|
||||
ARRAY /dev/md/2 metadata=1.2 name=rescue:2 UUID=082e5878:cad579b5:5b2d8dda:d41848f9
|
||||
ARRAY /dev/md/1 metadata=1.2 name=rescue:1 UUID=8005f92c:5ca33d93:b2a5c843:7d4500ba
|
||||
ARRAY /dev/md3 metadata=1.2 name=megapede:3 UUID=6e20ae93:f1548680:56621159:b8910258
|
||||
|
||||
|
||||
|
||||
Then we also make sure that it's written in /etc/fstab:
|
||||
|
||||
|
||||
root@megapede ~ # blkid
|
||||
/dev/nvme0n1p3: UUID="082e5878-cad5-79b5-5b2d-8ddad41848f9" UUID_SUB="12260b20-4e48-3bda-bbaf-2d989216743e" LABEL="rescue:2" TYPE="linux_raid_member" PARTUUID="0363e1f6-03"
|
||||
/dev/nvme0n1p1: UUID="9a83d519-88dc-0912-2d34-658bd9ac4a20" UUID_SUB="74300757-2675-0206-93db-1ef7986345c4" LABEL="rescue:0" TYPE="linux_raid_member" PARTUUID="0363e1f6-01"
|
||||
/dev/nvme0n1p2: UUID="8005f92c-5ca3-3d93-b2a5-c8437d4500ba" UUID_SUB="1dcf6c06-41f5-9b79-1620-ae51258931d4" LABEL="rescue:1" TYPE="linux_raid_member" PARTUUID="0363e1f6-02"
|
||||
/dev/sdb: UUID="6e20ae93-f154-8680-5662-1159b8910258" UUID_SUB="54392230-39fb-ab76-10ea-afda06676e1f" LABEL="megapede:3" TYPE="linux_raid_member"
|
||||
/dev/md2: UUID="9718e8fc-3a45-4338-8157-a1b64c29c894" BLOCK_SIZE="4096" TYPE="ext4"
|
||||
/dev/md0: UUID="10f6338c-c09c-44d5-b698-6aa8a04ef837" TYPE="swap"
|
||||
/dev/nvme1n1p2: UUID="8005f92c-5ca3-3d93-b2a5-c8437d4500ba" UUID_SUB="eb98c252-4599-7b14-14f0-4360a4abad7b" LABEL="rescue:1" TYPE="linux_raid_member" PARTUUID="2306f806-02"
|
||||
/dev/nvme1n1p3: UUID="082e5878-cad5-79b5-5b2d-8ddad41848f9" UUID_SUB="6e4b15b0-77e8-1635-5fc8-ab2a2aceadcb" LABEL="rescue:2" TYPE="linux_raid_member" PARTUUID="2306f806-03"
|
||||
/dev/nvme1n1p1: UUID="9a83d519-88dc-0912-2d34-658bd9ac4a20" UUID_SUB="094cb30b-077d-3163-9776-73a53b2e404d" LABEL="rescue:0" TYPE="linux_raid_member" PARTUUID="2306f806-01"
|
||||
/dev/sda: UUID="6e20ae93-f154-8680-5662-1159b8910258" UUID_SUB="9059d7c8-c686-5ff5-20ed-b8c14e96ca0c" LABEL="megapede:3" TYPE="linux_raid_member"
|
||||
/dev/md1: UUID="d817acef-3aca-4edc-bd9a-559b50e47d20" BLOCK_SIZE="4096" TYPE="ext3"
|
||||
/dev/md3: UUID="433c3cc3-8c20-426a-9b10-fb56b231cd40" BLOCK_SIZE="4096" TYPE="ext4"
|
||||
|
||||
root@Megapede ~ # cat /etc/fstab
|
||||
proc /proc proc defaults 0 0
|
||||
# /dev/md/0
|
||||
UUID=10f6338c-c09c-44d5-b698-6aa8a04ef837 none swap sw 0 0
|
||||
# /dev/md/1
|
||||
UUID=d817acef-3aca-4edc-bd9a-559b50e47d20 /boot ext3 defaults 0 0
|
||||
# /dev/md/2
|
||||
UUID=9718e8fc-3a45-4338-8157-a1b64c29c894 / ext4 defaults 0 0
|
||||
# /dev/md3
|
||||
UUID=433c3cc3-8c20-426a-9b10-fb56b231cd40 /mnt/md3 ext4 defaults 0 0
|
||||
|
||||
|
||||
|
||||
Then just reboot it to test if it got saved:
|
||||
|
||||
|
||||
root@Megapede ~ # lsblk
|
||||
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
|
||||
sda 8:0 0 1.8T 0 disk
|
||||
└─md3 9:3 0 1.8T 0 raid1 /mnt/md3
|
||||
sdb 8:16 0 1.8T 0 disk
|
||||
└─md3 9:3 0 1.8T 0 raid1 /mnt/md3
|
||||
nvme0n1 259:0 0 476.9G 0 disk
|
||||
├─nvme0n1p1 259:1 0 32G 0 part
|
||||
│ └─md0 9:0 0 32G 0 raid1 [SWAP]
|
||||
├─nvme0n1p2 259:2 0 1G 0 part
|
||||
│ └─md1 9:1 0 1022M 0 raid1 /boot
|
||||
└─nvme0n1p3 259:3 0 443.9G 0 part
|
||||
└─md2 9:2 0 443.8G 0 raid1 /
|
||||
nvme1n1 259:4 0 476.9G 0 disk
|
||||
├─nvme1n1p1 259:5 0 32G 0 part
|
||||
│ └─md0 9:0 0 32G 0 raid1 [SWAP]
|
||||
├─nvme1n1p2 259:6 0 1G 0 part
|
||||
│ └─md1 9:1 0 1022M 0 raid1 /boot
|
||||
└─nvme1n1p3 259:7 0 443.9G 0 part
|
||||
└─md2 9:2 0 443.8G 0 raid1 /
|
||||
|
||||
root@Megapede ~ # reboot now
|
||||
|
||||
|
||||
|
||||
or you can just test the fstab without rebooting using mount:
|
||||
|
||||
|
||||
root@megapede ~ # umount /dev/md3
|
||||
root@megapede ~ # systemctl daemon-reload
|
||||
root@megapede ~ # lsblk
|
||||
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
|
||||
sda 8:0 0 1.8T 0 disk
|
||||
└─md3 9:3 0 1.8T 0 raid1
|
||||
sdb 8:16 0 1.8T 0 disk
|
||||
└─md3 9:3 0 1.8T 0 raid1
|
||||
|
||||
[...]
|
||||
|
||||
root@megapede ~ # mount /dev/md3
|
||||
root@megapede ~ # lsblk
|
||||
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
|
||||
sda 8:0 0 1.8T 0 disk
|
||||
└─md3 9:3 0 1.8T 0 raid1 /mnt/md3
|
||||
sdb 8:16 0 1.8T 0 disk
|
||||
└─md3 9:3 0 1.8T 0 raid1 /mnt/md3
|
||||
|
||||
[...]
|
||||
|
||||
|
||||
|
||||
and we successfully mounted it thanks to having it in /etc/fstab.
|
||||
|
||||
## Create a raid 1 with 1 disk, and then add another:
|
||||
|
||||
I have a usecase where i already have data on a disk, and i want to mirror it without loosing it. so i do the following:
|
||||
|
||||
|
||||
[ Wonderland ] [ /dev/pts/13 ] [~]
|
||||
→ lsblk
|
||||
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
|
||||
sda 8:0 0 232.9G 0 disk
|
||||
├─sda1 8:1 0 512M 0 part /boot/efi
|
||||
├─sda2 8:2 0 231.4G 0 part /
|
||||
└─sda3 8:3 0 977M 0 part [SWAP]
|
||||
nvme0n1 259:0 0 931.5G 0 disk
|
||||
└─nvme0n1p1 259:4 0 931.5G 0 part
|
||||
nvme1n1 259:1 0 931.5G 0 disk
|
||||
└─nvme1n1p1 259:3 0 931.5G 0 part
|
||||
└─veracrypt1 253:0 0 931.5G 0 dm /media/veracrypt1
|
||||
|
||||
[ Wonderland ] [ /dev/pts/13 ] [~]
|
||||
→ mdadm --zero-superblock /dev/nvme0n1
|
||||
mdadm: Unrecognised md component device - /dev/nvme0n1
|
||||
|
||||
[ Wonderland ] [ /dev/pts/13 ] [~]
|
||||
→ mdadm --create --verbose /dev/md3 --level=1 --raid-devices=1 /dev/nvme0n1 --force
|
||||
mdadm: partition table exists on /dev/nvme0n1
|
||||
mdadm: partition table exists on /dev/nvme0n1 but will be lost or
|
||||
meaningless after creating array
|
||||
mdadm: Note: this array has metadata at the start and
|
||||
may not be suitable as a boot device. If you plan to
|
||||
store '/boot' on this device please ensure that
|
||||
your boot-loader understands md/v1.x metadata, or use
|
||||
--metadata=0.90
|
||||
mdadm: size set to 976630464K
|
||||
mdadm: automatically enabling write-intent bitmap on large array
|
||||
Continue creating array? y
|
||||
mdadm: Defaulting to version 1.2 metadata
|
||||
mdadm: array /dev/md3 started.
|
||||
|
||||
[ Wonderland ] [ /dev/pts/13 ] [~]
|
||||
→ lsblk
|
||||
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
|
||||
sda 8:0 0 232.9G 0 disk
|
||||
├─sda1 8:1 0 512M 0 part /boot/efi
|
||||
├─sda2 8:2 0 231.4G 0 part /
|
||||
└─sda3 8:3 0 977M 0 part [SWAP]
|
||||
nvme0n1 259:0 0 931.5G 0 disk
|
||||
└─md3 9:3 0 931.4G 0 raid1
|
||||
nvme1n1 259:1 0 931.5G 0 disk
|
||||
└─nvme1n1p1 259:3 0 931.5G 0 part
|
||||
└─veracrypt1 253:0 0 931.5G 0 dm /media/veracrypt1
|
||||
|
||||
|
||||
|
||||
And then later on we'll add the device like so:
|
||||
|
||||
|
||||
[ Wonderland ] [ /dev/pts/13 ] [~]
|
||||
→ sfdisk -d /dev/nvme0n1 | sfdisk /dev/nvme1n1
|
||||
|
||||
[ Wonderland ] [ /dev/pts/13 ] [~]
|
||||
→ mdadm --manage /dev/md3 --add /dev/nvme1n1
|
||||
|
||||
[ Wonderland ] [ /dev/pts/15 ] [~]
|
||||
→ mdadm --detail /dev/md3
|
||||
/dev/md3:
|
||||
Version : 1.2
|
||||
Creation Time : Sat Oct 7 21:03:40 2023
|
||||
Raid Level : raid1
|
||||
Array Size : 976630464 (931.39 GiB 1000.07 GB)
|
||||
Used Dev Size : 976630464 (931.39 GiB 1000.07 GB)
|
||||
Raid Devices : 1
|
||||
Total Devices : 2
|
||||
Persistence : Superblock is persistent
|
||||
|
||||
Intent Bitmap : Internal
|
||||
|
||||
Update Time : Sat Oct 7 22:13:05 2023
|
||||
State : clean
|
||||
Active Devices : 1
|
||||
Working Devices : 2
|
||||
Failed Devices : 0
|
||||
Spare Devices : 1
|
||||
|
||||
Consistency Policy : bitmap
|
||||
|
||||
Name : wonderland:3 (local to host wonderland)
|
||||
UUID : 7ad13800:d65ab2f4:4bb35e9c:f27e4c35
|
||||
Events : 2
|
||||
|
||||
Number Major Minor RaidDevice State
|
||||
0 259 0 0 active sync /dev/nvme0n1
|
||||
|
||||
1 259 1 - spare /dev/nvme1n1
|
||||
|
||||
|
||||
[ Wonderland ] [ /dev/pts/15 ] [~]
|
||||
→ cat /proc/mdstat
|
||||
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
|
||||
md3 : active raid1 nvme1n1[1](S) nvme0n1[0]
|
||||
976630464 blocks super 1.2 [1/1] [U]
|
||||
bitmap: 7/8 pages [28KB], 65536KB chunk
|
||||
|
||||
unused devices: none
|
||||
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue