如果对磁盘直接使用fdisk分区,那么种种分区,我们叫做Linux的标准分区,Linux的标准分区如果格式化成文件系统后,挂载使用,那么一旦文件系统的空间满了,是无法扩容的,如果一定要"扩容",需要将数据备份出来,再换一个更大的文件系统,再将数据还原回去。这种方法效率特别低。
由此促使了逻辑卷的诞生
逻辑卷是个抽象的概念,我们需要记很多新的专属名词
pv(physical volume)物理卷
vg(volume group)卷组
lv(logical volume)逻辑卷
disk-->pv-->vg-->lv
1
2
3
4
5
6
7
8
pv的创建,pv的删除(向pv里面加入新的块设备就相当于扩容pv,从pv里面删除块设备,就相当于缩容pv)
创建pv
[root@server ~]# pvcreate /dev/sda1
Physical volume "/dev/sda1" successfully created.
[root@server ~]# pvcreate /dev/sda2
Physical volume "/dev/sda2" successfully created.
[root@server ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 cs lvm2 a-- <79.00g 0
/dev/sda1 lvm2 --- 1.00g 1.00g
/dev/sda2 lvm2 --- 2.00g 2.00g
删除pv #如果pv已经被某个vg使用,那么是无法删除的
[root@server ~]# pvremove /dev/sda1
Labels on physical volume "/dev/sda1" successfully wiped.
[root@server ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 cs lvm2 a-- <79.00g 0
/dev/sda2 lvm2 --- 2.00g 2.00g
pv可以直接指定一块硬盘
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
vg的创建,vg的删除,vg的扩容
创建vg
[root@server ~]# vgcreate vg1 /dev/sda1
Volume group "vg1" successfully created
[root@server ~]# vgs
VG #PV #LV #SN Attr VSize VFree
cs 1 3 0 wz--n- <79.00g 0
vg1 1 0 0 wz--n- 1020.00m 1020.00m删除vg
[root@server ~]# vgremove vg1
Volume group "vg1" successfully removed
[root@server ~]# vgs
VG #PV #LV #SN Attr VSize VFree
cs 1 3 0 wz--n- <79.00g 0
如果vg上有逻辑卷在使用,那么vg无法删除
vg的扩容
[root@server ~]# vgextend vg1 /dev/sda2
Volume group "vg1" successfully extended
[root@server ~]# vgs
VG #PV #LV #SN Attr VSize VFree
cs 1 3 0 wz--n- <79.00g 0
vg1 2 0 0 wz--n- 2.99g 2.99g
[root@server ~]# vgdisplay #查看vg的详细信息
--- Volume group ---
VG Name cs
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 4
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 3
Open LV 3
Max PV 0
Cur PV 1
Act PV 1
VG Size <79.00 GiB
PE Size 4.00 MiB
Total PE 20223
Alloc PE / Size 20223 / <79.00 GiB
Free PE / Size 0 / 0
VG UUID ZOZBIu-IWIe-Xf9T-XX5V-boRS-e1V1-wBeg6o--- Volume group ---
VG Name vg1
System ID
Format lvm2
Metadata Areas 2
Metadata Sequence No 2
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 2
Act PV 2
VG Size 2.99 GiB
PE Size 4.00 MiB
Total PE 766
Alloc PE / Size 0 / 0
Free PE / Size 766 / 2.99 GiB
VG UUID Emc9Rb-F3vv-lhjh-L53i-eh1w-hkWj-EqF08i
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
lv的创建,lv的删除,lv的扩容
lv的创建
-n参数表示逻辑卷的名字
-L参数表示逻辑卷的大小
vg1就表示使用vg1创建该逻辑卷
[root@server ~]# lvcreate -n lv1 -L 1G vg1
Logical volume "lv1" created.
[root@server ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
home cs -wi-ao---- 23.33g
root cs -wi-ao---- 47.79g
swap cs -wi-ao---- 7.87g
lv1 vg1 -wi-a----- 1.00g逻辑卷创建完成之后,可以直接格式化成文件系统使用。
如果卷组下面创建一个新的逻辑卷,那么该逻辑卷就会自动的在设备文件路径出现/dev/vgname/lvname
[root@server ~]# ls /dev/vg1 -l
total 0
lrwxrwxrwx. 1 root root 7 May 2 09:18 lv1 -> ../dm-3
lrwxrwxrwx. 1 root root 7 May 2 09:21 lv2 -> ../dm-4
[root@server ~]# ls -l /dev/dm-3 /dev/dm-4
brw-rw----. 1 root disk 253, 3 May 2 09:18 /dev/dm-3
brw-rw----. 1 root disk 253, 4 May 2 09:21 /dev/dm-4
dm设备就表示逻辑卷的本尊
lv的删除
[root@server ~]# lvremove /dev/vg1/lv1
Do you really want to remove active logical volume vg1/lv1? [y/n]: y
Logical volume "lv1" successfully removed.
lv的扩容
[root@server ~]# lvextend /dev/vg1/lv1-ext4 -L 2g
Size of logical volume vg1/lv1-ext4 changed from 1.00 GiB (256 extents) to 2.00 GiB (512 extents).
Logical volume vg1/lv1-ext4 successfully resized.
[root@server ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
home cs -wi-ao---- 23.33g
root cs -wi-ao---- 47.79g
swap cs -wi-ao---- 7.87g
lv1-ext4 vg1 -wi-ao---- 2.00g
lv2-xfs vg1 -wi-ao---- 1.00g
[root@server ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 3.8G 0 3.8G 0% /dev
tmpfs tmpfs 3.8G 0 3.8G 0% /dev/shm
tmpfs tmpfs 3.8G 17M 3.8G 1% /run
tmpfs tmpfs 3.8G 0 3.8G 0% /sys/fs/cgroup
/dev/mapper/cs-root xfs 48G 2.0G 46G 5% /
/dev/nvme0n1p1 xfs 1014M 210M 805M 21% /boot
/dev/mapper/cs-home xfs 24G 199M 24G 1% /home
tmpfs tmpfs 774M 0 774M 0% /run/user/0
/dev/mapper/vg1-lv1--ext4 ext4 976M 960M 0 100% /ext4-test
/dev/mapper/vg1-lv2--xfs xfs 1014M 1014M 20K 100% /xfs-test
#因为扩容的部分没有格式化,所以在文件系统上检测不到ext4将扩容的空间加入到文件系统操作
[root@server ~]# resize2fs /dev/vg1/lv1-ext4
resize2fs 1.45.6 (20-Mar-2020)
Filesystem at /dev/vg1/lv1-ext4 is mounted on /ext4-test; on-line resizing required
old_desc_blocks = 1, new_desc_blocks = 1
The filesystem on /dev/vg1/lv1-ext4 is now 652288 (4k) blocks long.
[root@server ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 3.8G 0 3.8G 0% /dev
tmpfs tmpfs 3.8G 0 3.8G 0% /dev/shm
tmpfs tmpfs 3.8G 17M 3.8G 1% /run
tmpfs tmpfs 3.8G 0 3.8G 0% /sys/fs/cgroup
/dev/mapper/cs-root xfs 48G 2.0G 46G 5% /
/dev/nvme0n1p1 xfs 1014M 210M 805M 21% /boot
/dev/mapper/cs-home xfs 24G 199M 24G 1% /home
tmpfs tmpfs 774M 0 774M 0% /run/user/0
/dev/mapper/vg1-lv1--ext4 ext4 2.5G 961M 1.4G 41% /ext4-test
/dev/mapper/vg1-lv2--xfs xfs 1014M 1014M 20K 100% /xfs-tes
t
xfs将扩容的空间加入到文件系统操作
[root@server ~]# xfs_growfs /dev/vg1/lv2-xfs #指定块设备
meta-data=/dev/mapper/vg1-lv2--xfs isize=512 agcount=4, agsize=65536 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 262144 to 655360
[root@server ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 3.8G 0 3.8G 0% /dev
tmpfs tmpfs 3.8G 0 3.8G 0% /dev/shm
tmpfs tmpfs 3.8G 17M 3.8G 1% /run
tmpfs tmpfs 3.8G 0 3.8G 0% /sys/fs/cgroup
/dev/mapper/cs-root xfs 48G 2.0G 46G 5% /
/dev/nvme0n1p1 xfs 1014M 210M 805M 21% /boot
/dev/mapper/cs-home xfs 24G 199M 24G 1% /home
tmpfs tmpfs 774M 0 774M 0% /run/user/0
/dev/mapper/vg1-lv1--ext4 ext4 2.5G 961M 1.4G 41% /ext4-test
/dev/mapper/vg1-lv2--xfs xfs 2.5G 1.1G 1.5G 41% /xfs-test
[root@server ~]# xfs_growfs /xfs-test/ #指定挂载点
meta-data=/dev/mapper/vg1-lv2--xfs isize=512 agcount=10, agsize=65536 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=655360, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
格式化vg1中的lv1
[root@server ~]# mkfs.ext4 /dev/vg1/lv1
mke2fs 1.45.6 (20-Mar-2020)
Creating filesystem with 262144 4k blocks and 65536 inodes
Filesystem UUID: 3740310b-630e-4d75-947c-7b988833343a
Superblock backups stored on blocks:
32768, 98304, 163840, 229376
Allocating group tables: done
Writing inode tables: done
Creating journal (8192 blocks): done
Writing superblocks and filesystem accounting information: done
1
2
3
4
5
6
7
8
9
10
11
disk的uuid
[root@server ~]# ls -l /dev/disk/by-uuid/
total 0
lrwxrwxrwx. 1 root root 10 May 2 09:29 3740310b-630e-4d75-947c-7b988833343a -> ../../dm-3
lrwxrwxrwx. 1 root root 10 May 2 09:00 7872274e-c7b5-4dbb-b6cf-5d1cff799f36 -> ../../dm-2
lrwxrwxrwx. 1 root root 15 May 2 09:00 8e2d7a13-8485-4560-9534-273341283596 -> ../../nvme0n1p1
lrwxrwxrwx. 1 root root 10 May 2 09:00 a0326c1d-7588-4dcf-98b1-0b156d4c15d5 -> ../../dm-0
lrwxrwxrwx. 1 root root 10 May 2 09:00 f3e45e3b-65a9-4087-bfc9-7f4075c188eb -> ../../dm-1