系统版本:centos6.4最小化

node1:192.168.31.111

node2:192.168.31.112

编译环境:yum -y groupinstall "Development tools" "Server Platform Development"

drbd版本:drbd-8.4.7-1.tar.gz、drbd-utils-8.9.5.tar.gz

前提条件:

1、节点node1、node2基于ssh互信

2、节点之间时间同步

3、节点名称与hostname(uname -n)一致,不要使用dns解析

详细看操作:

node1:

ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''

ssh-copy-id -i .ssh/id_rsa.pub root@192.168.31.112

node2:

ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''

ssh-copy-id -i .ssh/id_rsa.pub root@192.168.31.111


yum -y install rdate

rdate -s time-b.nist.gov

crontab -u root -e

*/2 * * * * /usr/bin/rdate -s time-b.nist.gov


vim /etc/hosts

192.168.31.111    node1

192.168.31.112    node2

hostname node1

vim /etc/sysconfig/network

HOSTNAME=node1


vim /etc/hosts

192.168.31.111    node1

192.168.31.112    node2

hostname node2

vim /etc/sysconfig/network

HOSTNAME=node2



准备操作搞定于是编译安装drbd:

为了避免编译过程遇到的错误,先安装下依赖包

yum -y install libxslt docbook-style-xsl

tar xf drbd-8.4.7-1.tar.gz

cd drbd-8.4.7-1

make && make install

cd /root

tar xf drbd-utils-8.9.5.tar.gz

cd drbd-utils-8.9.5

./configure --prefix=/usr/local/drbd --sysconfdir=/etc/

make && make install

modprobe drbd

lsmod |grep drbd


#########################################################

为我的系统加上一块5G的磁盘(node1和node2分别加上)

node1:

fdisk /dev/sdb

p,n,p,1,+3G,w

partx /dev/sdb

node2同理


配置drbd文件:

三个配置文件:

/etc/drbd.conf,/etc/drbd.d/global_common.conf,再就是自己创建的资源配置文件/etc/drbd.d/web.res


vi global_common.conf

global {

        usage-count yes;

        # minor-count dialog-refresh disable-ip-verification

        # cmd-timeout-short 5; cmd-timeout-medium 121; cmd-timeout-long 600;

}


common {

        handlers {

                # These are EXAMPLE handlers only.

                # They may have severe implications,

                # like hard resetting the node under certain circumstances.

                # Be careful when chosing your poison.


                 pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";

                 pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";

                 local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";

                # fence-peer "/usr/lib/drbd/crm-fence-peer.sh";

                # split-brain "/usr/lib/drbd/notify-split-brain.sh root";

                # out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";

                # before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k";

                # after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh;

        }


        startup {

                # wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb

        }


        options {

                # cpu-mask on-no-data-accessible

        }


        disk {

                # size on-io-error fencing disk-barrier disk-flushes

                # disk-drain md-flushes resync-rate resync-after al-extents

                # c-plan-ahead c-delay-target c-fill-target c-max-rate

                # c-min-rate disk-timeout

                on-io-error     detach;

        }


        net {

                # protocol timeout max-epoch-size max-buffers unplug-watermark

                # connect-int ping-int sndbuf-size rcvbuf-size ko-count

                # allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri

                # after-sb-1pri after-sb-2pri always-asbp rr-conflict

                # ping-timeout data-integrity-alg tcp-cork on-congestion

                # congestion-fill congestion-extents csums-alg verify-alg

                # use-rle

                cram-hmac-alg   "sha1";

                shared-secret   "mydrbdlab";

        }

}

资源配置文件:

vi web.res

resource web {

        on node1 {

                device  /dev/drbd0;

                disk    /dev/sdb1;

                address 192.168.31.111:7789;

                meta-disk       internal;

        }

        on node2 {

                device  /dev/drbd0;

                disk    /dev/sdb1;

                address 192.168.31.112:7789;

                meta-disk       internal;

        }

}

将在node1上配置好的文件传送到node2上

scp /etc/drbd.d/global_common.conf /etc/drbd.d/web.res node2:/etc/drbd.d/


由于两个节点现在都是初建立drbd于是进行初始化:

在node1和node2上分别执行:

drbdadm create-md web(web为资源名称)


在node1和node2上分别执行:

启动drbd

service drbd start


查看drbd状态:

cat /proc/drbd

[root@node1 drbd.d]# 

[root@node1 drbd.d]# cat /proc/drbd

version: 8.4.6 (api:1/proto:86-101)

GIT-hash: 833d830e0152d1e457fa7856e71e11248ccf3f70 build by root@node1, 2016-05-13 20:06:11

 0: cs:Connected ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r-----

    ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:3156604


可以看出两个节点目前都是secondary,于是将node1改变为primary,执行以下操作:

drbdsetup /dev/drbd0 primary -o(这个命令本人亲测在自己的系统出错)改用下面的命令替换这个

drbdadm -- --overwrite-data-of-peer primary web(web为资源名称)

watch -n 1 ‘cat /proc/drbd’:每一秒同步一次命令执行的结果

同步数据完成后再次查看cat /proc/drbd

[root@node1 drbd.d]# cat /proc/drbd

version: 8.4.6 (api:1/proto:86-101)

GIT-hash: 833d830e0152d1e457fa7856e71e11248ccf3f70 build by root@node1, 2016-05-13 20:06:11

 0: cs:SyncSource ro:Primary/Secondary ds:UpToDate/Inconsistent C r-----

    ns:1692760 nr:0 dw:0 dr:1693424 al:0 bm:0 lo:0 pe:2 ua:0 ap:0 ep:1 wo:f oos:1464956

[=========>..........] sync'ed: 53.7% (1464956/3156604)K

finish: 0:00:56 speed: 25,864 (17,804) K/sec


[root@node1 ~]# cat /proc/drbd

version: 8.4.6 (api:1/proto:86-101)

GIT-hash: 833d830e0152d1e457fa7856e71e11248ccf3f70 build by root@node1, 2016-05-13 20:06:11

 0: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r-----

    ns:3272832 nr:0 dw:116228 dr:3157977 al:40 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0

至此安装配置已经完成

接下来挂载刚刚的分区

mkdir /mnt/drbd

mke2fs -j /dev/drbd0

mount /dev/drbd0 /mnt/drbd

cd /mnt/drbd

[root@node1 drbd]# ls

lost+found

创建一个文件:dd if=/dev/zero of=/mnt/drbd/tmp.txt bs=10K count=20

[root@node1 drbd]# ls

lost+found  tmp.txt


注意:只有primary节点才能挂载

于是为了验证从节点是否同步到了数据,该主节点必须卸载,然后让刚刚的从节点挂载上去查看


操作如下:

umount /mnt/drbd

drbdadm secondary web

cat /proc/drbd

[root@node1 ~]# cat /proc/drbd

version: 8.4.6 (api:1/proto:86-101)

GIT-hash: 833d830e0152d1e457fa7856e71e11248ccf3f70 build by root@node1, 2016-05-13 20:06:11

 0: cs:Connected ro:Secondary/Secondary ds:UpToDate/UpToDate C r-----

    ns:3273108 nr:0 dw:116504 dr:3158338 al:41 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0


node2:

drbdadm primary web

[root@node2 mnt]# cat /proc/drbd

version: 8.4.6 (api:1/proto:86-101)

GIT-hash: 833d830e0152d1e457fa7856e71e11248ccf3f70 build by root@node2, 2016-05-13 20:12:04

 0: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r-----

    ns:0 nr:3273108 dw:3273108 dr:664 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0


mkdir /mnt/drbd

mount /dev/drbd0 /mnt/drbd

[root@node2 ~]# cd /mnt/drbd/

[root@node2 drbd]# ls

lost+found  tmp.txt


可以清晰的查看到tmp.txt文件,至此同步数据完成