DRBD+HEARTBEAT+NFS简要命令记录

前期准备:2个机器分别加磁盘,网络调通1个心跳线同步线

由fdisk -l可以看出有一块大小为10.7G的设备/dev/sdb,对/dev/sdb来创建逻辑卷:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
[root@scj ~]# pvcreate /dev/sdb             #创建pv
  Physical volume "/dev/sdb" successfully created
[root@scj ~]# pvs
  PV         VG       Fmt  Attr PSize  PFree 
  /dev/sda2  VolGroup lvm2 a--  19.51g     0 
  /dev/sdb            lvm2 a--  10.00g 10.00g
[root@scj ~]# vgcreate drbd /dev/sdb        #创建卷组drbd,将pv加到卷组中
  Volume group "drbd" successfully created
[root@scj ~]# vgs
  VG       #PV #LV #SN Attr   VSize  VFree 
  VolGroup   1   2   0 wz--n- 19.51g     0 
  drbd       1   0   0 wz--n- 10.00g 10.00g
[root@scj ~]# lvcreate -n dbm -L 9G drbd     #在卷组drbd中创建lvm逻辑卷
  Logical volume "dbm" created
[root@scj ~]# lvs
  LV      VG       Attr      LSize  Pool Origin Data%  Move Log Cpy%Sync Convert
  lv_root VolGroup -wi-ao--- 18.51g                                             
  lv_swap VolGroup -wi-ao---  1.00g                                             
  dbm     drbd     -wi-a----  9.00g 
[root@scj ~]# ls /dev/drbd/dbm               #查看创建的逻辑卷
/dev/drbd/dbm

一、drbd安装:

  1. 安装epel源 (2台主机一样)

    wget http://download.Fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm

    rpm -ivh epel-release-6-8.noarch.rpm

    rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6

    rpm -ivh http://elrepo.org/elrepo-release-6-5.el6.elrepo.noarch.rpm

    yum list

  2. 安装DRBD

    yum -y install drbd84 kmod-drbd84

    modprobe drbd       #加载drbd模块  报错就重启机器

    lsmod | grep drbd 

    vim /etc/drbd.conf #查看主配置文件

    修改全局配置文件 (基本没改)vi /etc/drbd.d/global_common.conf
    # DRBD is the result of over a decade of development by LINBIT.
    # In case you need professional services for DRBD or have
    # feature requests visit http://www.linbit.com

    global {
        usage-count no;
        # minor-count dialog-refresh disable-ip-verification
        # cmd-timeout-short 5; cmd-timeout-medium 121; cmd-timeout-long 600;
    }

    common {
        handlers {
            # These are EXAMPLE handlers only.
            # They may have severe implications,
            # like hard resetting the node under certain circumstances.
            # Be careful when chosing your poison.

            # pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh;
    /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot
    -f";
            # pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh;
    /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot
    -f";
            # local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drb
    d/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";
            # fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
            # split-brain "/usr/lib/drbd/notify-split-brain.sh root";
            # out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";
            # before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm
    .sh -p 15 -- -c 16k";
            # after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm
    .sh;
        }

        startup {
            # wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-s
    b
        }

        options {
            # cpu-mask on-no-data-accessible
        }

        disk {
            # size on-io-error fencing disk-barrier disk-flushes
            # disk-drain md-flushes resync-rate resync-after al-extents
                    # c-plan-ahead c-delay-target c-fill-target c-max-rate
                    # c-min-rate disk-timeout
        }

        net {
            # protocol timeout max-epoch-size max-buffers unplug-watermark
            # connect-int ping-int sndbuf-size rcvbuf-size ko-count
            # allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri
            # after-sb-1pri after-sb-2pri always-asbp rr-conflict
            # ping-timeout data-integrity-alg tcp-cork on-congestion
            # congestion-fill congestion-extents csums-alg verify-alg
            # use-rle
             cram-hmac-alg "sha1"; #设置加密算法sha1   
            shared-secret "mydrbdlab"; #设置加密key
        }
    }


  3. 增加资源

    cat /etc/drbd.d/web.res
    resource web {   
      on fuzai01 {   
        device    /dev/drbd0;   
        disk      /dev/mapper/drbd-dbm;   
        address  172.16.100.2:7789;   
        meta-disk internal;   
      }   
      on fuzai02 {   
        device    /dev/drbd0;   
        disk      /dev/mapper/drbd-dbm;   
        address  172.16.100.3:7789;   
        meta-disk internal;   
      }   
    }

  4. 保证两台机器配置文件一样

  5. node1与node2上初始化资源

    mknod /dev/drbd0 b 147 0  创建DRBD

    drbdadm create-md web   执行2次

  6. 设置主节点,同步数据 drbdadm  -- --overwrite-data-of-peer primary all

    查看状态 service drbd status

  7. 格式化DRBD   mkfs.ext4 /dev/drbd0  #从节点不用操作

  8. 挂载DRBD mkdir /data         #创建数据目录 mount  /dev/drbd0 /data

  9. DRBD角色切换 主 停drbd服务,停不了先停heartbeat 或卸载umount /data

                  从 drbdadm primary web 设置为Primary状态   mount /dev/drbd0 /data/

二、HEARTBEAT

  1. 安装 yum -y install heartbeat

  2. Heartbeat配置共涉及以下几个文件:
    /etc/ha.d/ha.cf          #主配置文件
    /etc/ha.d/haresources    #资源文件
    /etc/ha.d/authkeys       #认证相关
    /etc/ha.d/resource.d/killnfsd      #nfs启动脚本,由HeartBeat管理
  3. cat /etc/ha.d/ha.cf 
    debugfile /var/log/ha-debug
    logfile    /var/log/ha-log
    logfacility    local0
    keepalive 2
    deadtime 10
    warntime 6
    udpport    694
    ucast eth0 192.168.1.168
    auto_failback off
    node    fuzai01
    node    fuzai02
    ping 192.168.1.199
    respawn hacluster /usr/lib64/heartbeat/ipfail

    vi /etc/ha.d/haresources
    fuzai01 IPaddr::192.168.1.160/24/eth0 drbddisk::web Filesystem::/dev/drbd0::/data::ext4 killnfsd
    vi /etc/ha.d/resource.d/killnfsd  别忘加权限chmod 755
    vi /etc/ha.d/authkeys  
    auth 1
    1 crc

   4.启动服务查看vip 

ip a|grep eth0

三、NFS安装

1、安装nfs

[root@M1 drbd]# yum install nfs-utils rpcbind -y   
[root@M2 ~]# yum install nfs-utils rpcbind -y

2、配置 nfs 共享目录

[root@M1 drbd]# cat /etc/exports    /data 192.168.1.0/24(rw,sync,no_root_squash,anonuid=0,anongid=0)   
[root@M2 ~]# cat /etc/exports    /data 192.168.0.0/24(rw,sync,no_root_squash,anonuid=0,anongid=0)

3、启动 rpcbind 和 nfs 服务

/etc/init.d/rpcbind start;chkconfig rpcbind on    

/etc/init.d/nfs start;chkconfig nfs off   (切到备机后,主机恢复后手工切换回)

四、模拟测试 添加监控nfs服务脚本方法

http://732233048.blog.51cto.com/9323668/1669417