1、通过mdadm命令进行磁盘阵列部署

mdadm是multiple devices admin的简称,它是Linux下的一款标准的软件 RAID 管理工具
如果没有mdadm命令,通过yum安装一下
yum install -y mdadm
mdadm的主要参数

-a  检测设备名称
-n  指定设备数量
-l  指定 RAID 级别
-C  创建
-v  显示过程
-f  模拟设备损坏
-r  移除设备
-Q  查看摘要信息
-D  查看详细信息
-S  停止 RAID 磁盘阵列

更多的参数说明使用man mdadm来查看。

2、创建raid10

1)查看硬盘情况

#先看看磁盘情况
[root@bigdata-senior01 ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0   20G  0 disk 
├─sda1            8:1    0    1G  0 part /boot
└─sda2            8:2    0   19G  0 part 
  ├─centos-root 253:0    0   32G  0 lvm  /
  └─centos-swap 253:1    0    2G  0 lvm  [SWAP]
sdb               8:16   0   20G  0 disk 
├─sdb1            8:17   0    2G  0 part /backup
├─sdb2            8:18   0    3G  0 part [SWAP]
└─sdb3            8:19   0   15G  0 part 
  └─centos-root 253:0    0   32G  0 lvm  /
sdc               8:32   0    1G  0 disk 
sdd               8:48   0    1G  0 disk 
sde               8:64   0    1G  0 disk 
sdf               8:80   0    1G  0 disk 
sr0              11:0    1  4.2G  0 rom

2)开始创建:

-C 参数代表创建一个 RAID 阵列卡;-v 参数显示创建的过程,后面是一个设备名称/dev/md0,是创建后的RAID磁盘阵列的名称;
-a yes 参数代表自动创建设备文件;-n 4 参数代表使用 4 块硬盘来部署这个RAID 磁盘阵列;
而-l 10 参数则代表 RAID 10 方案;最后 4 块硬盘设备的名称

[root@bigdata-senior01 ~]# mdadm -Cv /dev/md0 -a yes -n 4 -l 10 /dev/sdc /dev/sdd /dev/sde /dev/sdf
mdadm: layout defaults to n2
mdadm: layout defaults to n2
mdadm: chunk size defaults to 512K
mdadm: size set to 1046528K
mdadm: Fail create md0 when using /sys/module/md_mod/parameters/new_array
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.

[root@bigdata-senior01 ~]# file /dev/md0
/dev/md0: block special

3)格式化阵列

[root@bigdata-senior01 ~]# mkfs
mkfs         mkfs.btrfs   mkfs.cramfs  mkfs.ext2    mkfs.ext3    mkfs.ext4    mkfs.minix   mkfs.xfs
[root@bigdata-senior01 ~]# mkfs.ext4 /dev/md0
mke2fs 1.42.9 (28-Dec-2013)
文件系统标签=
OS type: Linux
块大小=4096 (log=2)
分块大小=4096 (log=2)
Stride=128 blocks, Stripe width=256 blocks
130816 inodes, 523264 blocks
26163 blocks (5.00%) reserved for the super user
第一个数据块=0
Maximum filesystem blocks=536870912
16 block groups
32768 blocks per group, 32768 fragments per group
8176 inodes per group
Superblock backups stored on blocks: 
    32768, 98304, 163840, 229376, 294912

Allocating group tables: 完成                            
正在写入inode表: 完成                            
Creating journal (8192 blocks): 完成
Writing superblocks and filesystem accounting information: 完成

4)挂载阵列

[root@bigdata-senior01 ~]# mount /dev/md0 /raid/raid10

[root@bigdata-senior01 ~]# df -h
文件系统                 容量  已用  可用 已用% 挂载点
/dev/mapper/centos-root   32G  4.2G   28G   13% /
devtmpfs                 901M     0  901M    0% /dev
tmpfs                    912M     0  912M    0% /dev/shm
tmpfs                    912M  8.6M  904M    1% /run
tmpfs                    912M     0  912M    0% /sys/fs/cgroup
/dev/sda1               1014M  143M  872M   15% /boot
/dev/sdb1                2.0G   67M  2.0G    4% /backup
tmpfs                    183M     0  183M    0% /run/user/1004
/dev/md0                 2.0G  6.0M  1.9G    1% /raid/raid10

阵列容量2G,符合预期

5)查看阵列信息

[root@bigdata-senior01 ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Wed Jan 16 16:23:44 2019
        Raid Level : raid10
        Array Size : 2093056 (2044.00 MiB 2143.29 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 4
     Total Devices : 4
       Persistence : Superblock is persistent

       Update Time : Wed Jan 16 16:31:05 2019
             State : clean 
    Active Devices : 4
   Working Devices : 4
    Failed Devices : 0
     Spare Devices : 0

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

              Name : bigdata-senior01.home.com:0  (local to host bigdata-senior01.home.com)
              UUID : 1ad7fbe9:3ed88dbf:48408f25:9d49eae2
            Events : 17

    Number   Major   Minor   RaidDevice State
       0       8       32        0      active sync set-A   /dev/sdc
       1       8       48        1      active sync set-B   /dev/sdd
       2       8       64        2      active sync set-A   /dev/sde
       3       8       80        3      active sync set-B   /dev/sdf

6)编辑/etc/fstab永久挂载阵列

vi /etc/fstab
# /etc/fstab
# Created by anaconda on Sun Apr 29 17:25:33 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=fa75616e-a122-4c73-9fd4-b1d50a4af91a /boot                   xfs     defaults        0 0
/dev/mapper/centos-swap swap                    swap    defaults        0 0
/dev/sdb1               /backup                 xfs     defaults        0 0
/dev/sdb2               swap                    swap    defaults        0 0
/dev/md0                /raid/raid10            ext4    defaults        0 0

7)模拟磁盘损坏及修复

#模拟/dev/sdc损坏
[root@bigdata-senior01 ~]# mdadm /dev/md0 -f /dev/sdc
mdadm: set /dev/sdc faulty in /dev/md0

[root@bigdata-senior01 ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Wed Jan 16 16:23:44 2019
        Raid Level : raid10
        Array Size : 2093056 (2044.00 MiB 2143.29 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 4
     Total Devices : 4
       Persistence : Superblock is persistent

       Update Time : Wed Jan 16 16:42:27 2019
             State : clean, degraded 
    Active Devices : 3
   Working Devices : 3
    Failed Devices : 1
     Spare Devices : 0

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

              Name : bigdata-senior01.home.com:0  (local to host bigdata-senior01.home.com)
              UUID : 1ad7fbe9:3ed88dbf:48408f25:9d49eae2
            Events : 19

    Number   Major   Minor   RaidDevice State
       -       0        0        0      removed
       1       8       48        1      active sync set-B   /dev/sdd
       2       8       64        2      active sync set-A   /dev/sde
       3       8       80        3      active sync set-B   /dev/sdf

       0       8       32        -      faulty   /dev/sdc

#查看raid状态,缺了一个U,说明4个盘有一个down了
[root@bigdata-senior01 ~]# cat /proc/mdstat 
Personalities : [raid10] 
md0 : active raid10 sdf[3] sde[2] sdd[1] sdc[0](F)
      2093056 blocks super 1.2 512K chunks 2 near-copies [4/3] [_UUU]

#移除/dev/sdc
root@bigdata-senior01 ~]# mdadm /dev/md0 -r /dev/sdc
mdadm: hot removed /dev/sdc from /dev/md0

#通过lsblk以及mdadm -D /dev/md0命令可以查看一下情况
[root@bigdata-senior01 ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE   MOUNTPOINT
sda               8:0    0   20G  0 disk   
├─sda1            8:1    0    1G  0 part   /boot
└─sda2            8:2    0   19G  0 part   
  ├─centos-root 253:0    0   32G  0 lvm    /
  └─centos-swap 253:1    0    2G  0 lvm    [SWAP]
sdb               8:16   0   20G  0 disk   
├─sdb1            8:17   0    2G  0 part   /backup
├─sdb2            8:18   0    3G  0 part   [SWAP]
└─sdb3            8:19   0   15G  0 part   
  └─centos-root 253:0    0   32G  0 lvm    /
sdc               8:32   0    1G  0 disk   #已经离开阵列
sdd               8:48   0    1G  0 disk   
└─md0             9:0    0    2G  0 raid10 /raid/raid10
sde               8:64   0    1G  0 disk   
└─md0             9:0    0    2G  0 raid10 /raid/raid10
sdf               8:80   0    1G  0 disk   
└─md0             9:0    0    2G  0 raid10 /raid/raid10
sr0              11:0    1  4.2G  0 rom 

#现在假设/dev/sdc已经修复,重新加回到阵列里
[root@bigdata-senior01 ~]# mdadm /dev/md0 -a /dev/sdc
 mdadm: added /dev/sdc

#四个U,表示4块盘都正常了
[root@bigdata-senior01 ~]# cat /proc/mdstat 
Personalities : [raid10] 
md0 : active raid10 sdc[4] sdf[3] sde[2] sdd[1]
      2093056 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
      
unused devices: <none>

用mdadm -D /dev/md0也可以验证一下

最后:
#重要 mdadm运行时会自动检查/etc/mdadm.conf  文件并尝试自动装配,因此第一次配置raid后可以将信息导入到/etc/mdadm.conf中
[root@bigdata-senior01 ~]# mdadm -Ds /dev/md0 > /etc/mdadm.conf
[root@bigdata-senior01 ~]# cat /etc/mdadm.conf 
ARRAY /dev/md0 metadata=1.2 spares=1 name=bigdata-senior01.home.com:0 UUID=8eafb085:276f669d:c1b5cdd7:5ccd72d7

如果不生成mdadm.conf文件,mdadm会尝试自己加载raid,最后可能会出现把raid生成到/dev/md127里(不一定是127)

3、磁盘阵列+备份盘,实现4+1方式,准备一块多余的硬盘,这块硬盘平时处于闲置状态,一旦 RAID磁盘阵列中有硬盘出现故障后则会马上自动顶替

1)创建

[root@bigdata-senior01 ~]# cat /proc/partitions #查看硬盘和分区情况
major minor  #blocks  name

   8        0   20971520 sda
   8        1    1048576 sda1
   8        2   19921920 sda2
   8       16   20971520 sdb
   8       17    2097152 sdb1
   8       18    3145728 sdb2
   8       19   15727616 sdb3
   8       32    1048576 sdc
   8       48    1048576 sdd
   8       64    1048576 sde
   8       80    1048576 sdf
   8       96    1048576 sdg
  11        0    4365312 sr0
 253        0   33546240 dm-0
 253        1    2097152 dm-1
[root@bigdata-senior01 ~]# cat /proc/mdstat #查看有没有阵列
Personalities : 
unused devices: <none>

#普通阵列多一个参数-x,表示空余的盘
[root@bigdata-senior01 ~]# mdadm -Cv /dev/md0 -a yes -n 4 -l 10 -x 1 /dev/sdc /dev/sdd /dev/sde /dev/sdf /dev/sdg
mdadm: layout defaults to n2
mdadm: layout defaults to n2
mdadm: chunk size defaults to 512K
mdadm: size set to 1046528K
mdadm: Fail create md0 when using /sys/module/md_mod/parameters/new_array
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.

#查看阵列创建进度,最后等进度结束后再格式化
[root@bigdata-senior01 ~]# cat /proc/mdstat 
Personalities : [raid10] 
md0 : active raid10 sdg[4](S) sdf[3] sde[2] sdd[1] sdc[0]
      2093056 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
      [=============>.......]  resync = 67.8% (1419776/2093056) finish=0.0min speed=236629K/sec
      
unused devices: <none>
[root@bigdata-senior01 ~]# cat /proc/mdstat 
Personalities : [raid10] 
md0 : active raid10 sdg[4](S) sdf[3] sde[2] sdd[1] sdc[0]
      2093056 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
      
unused devices: <none>
[root@bigdata-senior01 ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Wed Jan 16 17:55:46 2019
        Raid Level : raid10
        Array Size : 2093056 (2044.00 MiB 2143.29 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 4
     Total Devices : 5
       Persistence : Superblock is persistent

       Update Time : Wed Jan 16 17:55:56 2019
             State : clean 
    Active Devices : 4
   Working Devices : 5
    Failed Devices : 0
     Spare Devices : 1

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

              Name : bigdata-senior01.home.com:0  (local to host bigdata-senior01.home.com)
              UUID : 8eafb085:276f669d:c1b5cdd7:5ccd72d7
            Events : 17

    Number   Major   Minor   RaidDevice State
       0       8       32        0      active sync set-A   /dev/sdc
       1       8       48        1      active sync set-B   /dev/sdd
       2       8       64        2      active sync set-A   /dev/sde
       3       8       80        3      active sync set-B   /dev/sdf

       4       8       96        -      spare   /dev/sdg


[root@bigdata-senior01 ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE   MOUNTPOINT
sda               8:0    0   20G  0 disk   
├─sda1            8:1    0    1G  0 part   /boot
└─sda2            8:2    0   19G  0 part   
  ├─centos-root 253:0    0   32G  0 lvm    /
  └─centos-swap 253:1    0    2G  0 lvm    [SWAP]
sdb               8:16   0   20G  0 disk   
├─sdb1            8:17   0    2G  0 part   /backup
├─sdb2            8:18   0    3G  0 part   [SWAP]
└─sdb3            8:19   0   15G  0 part   
  └─centos-root 253:0    0   32G  0 lvm    /
sdc               8:32   0    1G  0 disk   
└─md0             9:0    0    2G  0 raid10 
sdd               8:48   0    1G  0 disk   
└─md0             9:0    0    2G  0 raid10 
sde               8:64   0    1G  0 disk   
└─md0             9:0    0    2G  0 raid10 
sdf               8:80   0    1G  0 disk   
└─md0             9:0    0    2G  0 raid10 
sdg               8:96   0    1G  0 disk   
└─md0             9:0    0    2G  0 raid10 
sr0              11:0    1  4.2G  0 rom 

#重要 mdadm运行时会自动检查/etc/mdadm.conf  文件并尝试自动装配,因此第一次配置raid后可以将信息导入到/etc/mdadm.conf中
[root@bigdata-senior01 ~]# mdadm -Ds /dev/md0 > /etc/mdadm.conf

2) 格式化

[root@bigdata-senior01 ~]# mkfs.ext4 /dev/md0
mke2fs 1.42.9 (28-Dec-2013)
文件系统标签=
OS type: Linux
块大小=4096 (log=2)
分块大小=4096 (log=2)
Stride=128 blocks, Stripe width=256 blocks
130816 inodes, 523264 blocks
26163 blocks (5.00%) reserved for the super user
第一个数据块=0
Maximum filesystem blocks=536870912
16 block groups
32768 blocks per group, 32768 fragments per group
8176 inodes per group
Superblock backups stored on blocks: 
    32768, 98304, 163840, 229376, 294912

Allocating group tables: 完成                            
正在写入inode表: 完成                            
Creating journal (8192 blocks): 完成
Writing superblocks and filesystem accounting information: 完成

3)挂载:

[root@bigdata-senior01 ~]# mount /dev/md0 /raid/raid10
[root@bigdata-senior01 ~]# df -h
文件系统                 容量  已用  可用 已用% 挂载点
/dev/mapper/centos-root   32G  4.2G   28G   13% /
devtmpfs                 901M     0  901M    0% /dev
tmpfs                    912M     0  912M    0% /dev/shm
tmpfs                    912M  8.7M  903M    1% /run
tmpfs                    912M     0  912M    0% /sys/fs/cgroup
/dev/sda1               1014M  143M  872M   15% /boot
/dev/sdb1                2.0G   67M  2.0G    4% /backup
tmpfs                    183M     0  183M    0% /run/user/1004
/dev/md0                 2.0G  6.0M  1.9G    1% /raid/raid10

4)故障测试

#设置/dev/sdc故障
[root@bigdata-senior01 ~]# mdadm /dev/md0 -f /dev/sdc
mdadm: set /dev/sdc faulty in /dev/md0
[root@bigdata-senior01 ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Wed Jan 16 17:55:46 2019
        Raid Level : raid10
        Array Size : 2093056 (2044.00 MiB 2143.29 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 4
     Total Devices : 5
       Persistence : Superblock is persistent

       Update Time : Wed Jan 16 19:09:03 2019
             State : clean, degraded, recovering 
    Active Devices : 3
   Working Devices : 4
    Failed Devices : 1
     Spare Devices : 1

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

    Rebuild Status : 87% complete

              Name : bigdata-senior01.home.com:0  (local to host bigdata-senior01.home.com)
              UUID : 8eafb085:276f669d:c1b5cdd7:5ccd72d7
            Events : 32

    Number   Major   Minor   RaidDevice State
       4       8       96        0      spare rebuilding   /dev/sdg
       1       8       48        1      active sync set-B   /dev/sdd
       2       8       64        2      active sync set-A   /dev/sde
       3       8       80        3      active sync set-B   /dev/sdf

       0       8       32        -      faulty   /dev/sdc

#/dev/sdg自动顶替了/dev/sdc

5)移除故障盘/dev/sdc,然后重新加入

[root@bigdata-senior01 ~]# mdadm /dev/md0 -r /dev/sdc
mdadm: hot removed /dev/sdc from /dev/md0
[root@bigdata-senior01 ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Wed Jan 16 17:55:46 2019
        Raid Level : raid10
        Array Size : 2093056 (2044.00 MiB 2143.29 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 4
     Total Devices : 4
       Persistence : Superblock is persistent

       Update Time : Wed Jan 16 19:13:08 2019
             State : clean 
    Active Devices : 4
   Working Devices : 4
    Failed Devices : 0
     Spare Devices : 0

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

              Name : bigdata-senior01.home.com:0  (local to host bigdata-senior01.home.com)
              UUID : 8eafb085:276f669d:c1b5cdd7:5ccd72d7
            Events : 37

    Number   Major   Minor   RaidDevice State
       4       8       96        0      active sync set-A   /dev/sdg
       1       8       48        1      active sync set-B   /dev/sdd
       2       8       64        2      active sync set-A   /dev/sde
       3       8       80        3      active sync set-B   /dev/sdf

[root@bigdata-senior01 ~]# mdadm /dev/md0 -a /dev/sdc
mdadm: added /dev/sdc
[root@bigdata-senior01 ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Wed Jan 16 17:55:46 2019
        Raid Level : raid10
        Array Size : 2093056 (2044.00 MiB 2143.29 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 4
     Total Devices : 5
       Persistence : Superblock is persistent

       Update Time : Wed Jan 16 19:14:38 2019
             State : clean 
    Active Devices : 4
   Working Devices : 5
    Failed Devices : 0
     Spare Devices : 1

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

              Name : bigdata-senior01.home.com:0  (local to host bigdata-senior01.home.com)
              UUID : 8eafb085:276f669d:c1b5cdd7:5ccd72d7
            Events : 38

    Number   Major   Minor   RaidDevice State
       4       8       96        0      active sync set-A   /dev/sdg
       1       8       48        1      active sync set-B   /dev/sdd
       2       8       64        2      active sync set-A   /dev/sde
       3       8       80        3      active sync set-B   /dev/sdf

       5       8       32        -      spare   /dev/sdc

6)备盘可以加多块,可以在阵列已经创建后hot-add

#按上例,创建是用mdadm -Cv /dev/md0 -a yes -n 4 -l 10 /dev/sdc /dev/sdd /dev/sde /dev/sdf完成,备盘可以后加
[root@bigdata-senior01 ~]# mdadm /dev/md0 --add-spare /dev/sdg
mdadm: added /dev/sdg
[root@bigdata-senior01 ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Wed Jan 16 22:16:28 2019
        Raid Level : raid10
        Array Size : 2093056 (2044.00 MiB 2143.29 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 4
     Total Devices : 5
       Persistence : Superblock is persistent

       Update Time : Wed Jan 16 22:19:25 2019
             State : clean 
    Active Devices : 4
   Working Devices : 5
    Failed Devices : 0
     Spare Devices : 1

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

              Name : bigdata-senior01.home.com:0  (local to host bigdata-senior01.home.com)
              UUID : c9d67482:d9fb7776:3384ac75:2d46763a
            Events : 18

    Number   Major   Minor   RaidDevice State
       0       8       32        0      active sync set-A   /dev/sdc
       1       8       48        1      active sync set-B   /dev/sdd
       2       8       64        2      active sync set-A   /dev/sde
       3       8       80        3      active sync set-B   /dev/sdf

       4       8       96        -      spare   /dev/sdg

 

4、移除阵列,不正确移除阵列会引起各种奇葩的问题

1)卸载raid设备

[root@bigdata-senior01 ~]# umount /dev/md0

2)停止阵列

[root@bigdata-senior01 ~]# mdadm -S /dev/md0
mdadm: stopped /dev/md0

PS:重启阵列
[root@bigdata-senior01 ~]# mdadm -A -s /dev/md0
mdadm: Fail create md0 when using /sys/module/md_mod/parameters/new_array
mdadm: /dev/md0 has been started with 4 drives and 1 spare.

[root@bigdata-senior01 ~]# mdadm -D /dev/md0
mdadm: cannot open /dev/md0: No such file or directory
[root@bigdata-senior01 ~]# cat /proc/mdstat 
Personalities : [raid10] 
unused devices: <none>

3)删除磁盘

[root@bigdata-senior01 ~]# mdadm --misc --zero-superblock /dev/sdc
[root@bigdata-senior01 ~]# mdadm --misc --zero-superblock /dev/sdd
[root@bigdata-senior01 ~]# mdadm --misc --zero-superblock /dev/sde
[root@bigdata-senior01 ~]# mdadm --misc --zero-superblock /dev/sdf
[root@bigdata-senior01 ~]# mdadm --misc --zero-superblock /dev/sdg

4)删除或注销配置文件mdadm.conf和fstab等

[root@bigdata-senior01 ~]# cat /etc/mdadm.conf 
# ARRAY /dev/md0 metadata=1.2 spares=1 name=bigdata-senior01.home.com:0 UUID=8eafb085:276f669d:c1b5cdd7:5ccd72d7


[root@bigdata-senior01 ~]# cat /etc/fstab 

#
# /etc/fstab
# Created by anaconda on Sun Apr 29 17:25:33 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=fa75616e-a122-4c73-9fd4-b1d50a4af91a /boot                   xfs     defaults        0 0
/dev/mapper/centos-swap swap                    swap    defaults        0 0
/dev/sdb1               /backup                 xfs     defaults        0 0
/dev/sdb2               swap                    swap    defaults        0 0
# /dev/md0                /raid/raid10            ext4    defaults        0 0

reboot看看效果