一利用工具搭建redis集群

0前提条件:

执行下列步骤的前提是已经安装了redis,配置了服务器之间的互信以及添加了域名解析参考网址:

1安装redis支持的插件(不指定版本会给你安装最旧的):

[root@db01 ~]# yum makecache fast
[root@db01 ~]# yum install rubygems
[root@db01 ~]# gem sources --remove https://rubygems.org/
[root@db01 ~]# gem sources -a http://mirrors.aliyun.com/rubygems/
[root@db01 ~]# gem update - system
[root@db01 ~]# gem install redis -v 3.3.5

2分别在三台机器中清除掉旧数据:

[root@db01 ~]# pkill redis
[root@db01 ~]# ps -ef|grep redis
[root@db01 ~]# rm -rf /opt/redis_cluster/redis_{6380,6381}/{conf,logs,pid}
[root@db01 ~]# rm -rf /data/redis_cluster/redis_{6380,6381}
[root@db01 ~]# mkdir -p /opt/redis_cluster/redis_{6380,6381}/{conf,logs,pid}
[root@db01 ~]# mkdir -p /data/redis_cluster/redis_{6380,6381}
[root@db01 ~]#

3将redis执行脚本上传至服务器家目录中:

链接:https://pan.baidu.com/s/1aaEF9wosg-UX-ufxYMOteA 
提取码:abcd
[root@db01 ~]# sh redis_shell.sh stop 6380 #停止6380
[root@db01 ~]# sh redis_shell.sh ps #查看redis进程
[root@db01 ~]# sh redis_shell.sh start 6380 #运行6380
[root@db01 ~]# sh redis_shell.sh tail 6380 #查看端口日志
[root@db01 ~]# sh redis_shell.sh restart 6380 #重启6380端口

 4编写6380和6381的配置文件并将配置文件发送到其它服务器节点上:

[root@db01 ~]# cat >/opt/redis_cluster/redis_6380/conf/redis_6380.conf<<EOF
> bind 10.0.0.201
> port 6380
> daemonize yes
> pidfile "/opt/redis_cluster/redis_6380/pid/redis_6380.pid"
> logfile "/opt/redis_cluster/redis_6380/logs/redis_6380.log"
> dbfilename "redis_6380.rdb"
> dir "/data/redis_cluster/redis_6380/"
> cluster-enabled yes
> cluster-config-file nodes_6380.conf
> cluster-node-timeout 15000
> EOF
[root@db01 ~]# cd /opt/redis_cluster/
[root@db01 redis_cluster]# cp redis_6380/conf/redis_6380.conf redis_6381/conf/redis_6381.conf
[root@db01 redis_cluster]# sed -i 's#6380#6381#g' redis_6381/conf/redis_6381.conf
[root@db01 redis_cluster]# rsync -avz /opt/redis_cluster/redis_638* db02:/opt/redis_cluster/
[root@db01 redis_cluster]# rsync -avz /opt/redis_cluster/redis_638* db03:/opt/redis_cluster/

 5修改其它集群中配置文件里的ip后启动所有节点:

[root@db01 ~]# sh redis_shell.sh start 6380
[root@db01 ~]# sh redis_shell.sh start 6381
[root@db01 ~]# ps -ef|grep redis

[root@db02 ~]# find /opt/redis_cluster/redis_638* -type f -name "*.conf"|xargs sed -i "/bind/s#201#202#g"

[root@db03 ~]# find /opt/redis_cluster/redis_638* -type f -name "*.conf"|xargs sed -i "/bind/s#201#203#g"

 6使用工具搭建redis集群:

[root@db01 ~]# cd /opt/redis_cluster/redis/src/
[root@db01 src]# ./redis-trib.rb create --replicas 1 10.0.0.201:6380 10.0.0.202:6380 10.0.0.203:6380 10.0.0.201:6381 10.0.0.202:6381 10.0.0.203:6381

 7修改工具主从复制(工具小bug):

[root@db01 ~]# sh redis_shell.sh login 6380
10.0.0.201:6380> cluster nodes
ecaef5e2b992205f47de8230a47e7afcad69b1a1 10.0.0.203:6381 slave be9f4f5d37052b04c63045fc631d0d336def2fdc 0 1618416373199 6 connected
4407f27c55386e16501e28deefcb968e12502697 10.0.0.201:6381 slave 50f925532d5c3b5acc98d3ab2266aea3975c9e3f 0 1618416370175 4 connected
50f925532d5c3b5acc98d3ab2266aea3975c9e3f 10.0.0.202:6380 master - 0 1618416371184 2 connected 5461-10922
be9f4f5d37052b04c63045fc631d0d336def2fdc 10.0.0.203:6380 master - 0 1618416367150 3 connected 10923-16383
85ca53d73950ad2a6b53a1af678cdf097e7b5d06 10.0.0.202:6381 slave bedd4b0e703454112cb0596e5f568aa5296ef6a4 0 1618416372188 5 connected
bedd4b0e703454112cb0596e5f568aa5296ef6a4 10.0.0.201:6380 myself,master - 0 0 1 connected 0-5460
10.0.0.201:6380>
[root@db01 ~]# redis-cli -c -h db02 -p 6381 be9f4f5d37052b04c63045fc631d0d336def2fdc
(error) ERR unknown command 'be9f4f5d37052b04c63045fc631d0d336def2fdc'
[root@db01 ~]# redis-cli -c -h db02 -p 6381 cluster replicate be9f4f5d37052b04c63045fc631d0d336def2fdc
OK
[root@db01 ~]# redis-cli -c -h db03 -p 6381 cluster replicate bedd4b0e703454112cb0596e5f568aa5296ef6a4
OK
[root@db01 ~]# redis-cli -c -h db01 -p 6380 cluster nodes
ecaef5e2b992205f47de8230a47e7afcad69b1a1 10.0.0.203:6381 slave bedd4b0e703454112cb0596e5f568aa5296ef6a4 0 1618416612195 6 connected
4407f27c55386e16501e28deefcb968e12502697 10.0.0.201:6381 slave 50f925532d5c3b5acc98d3ab2266aea3975c9e3f 0 1618416609171 4 connected
50f925532d5c3b5acc98d3ab2266aea3975c9e3f 10.0.0.202:6380 master - 0 1618416611190 2 connected 5461-10922
be9f4f5d37052b04c63045fc631d0d336def2fdc 10.0.0.203:6380 master - 0 1618416613204 3 connected 10923-16383
85ca53d73950ad2a6b53a1af678cdf097e7b5d06 10.0.0.202:6381 slave be9f4f5d37052b04c63045fc631d0d336def2fdc 0 1618416608164 5 connected
bedd4b0e703454112cb0596e5f568aa5296ef6a4 10.0.0.201:6380

 8检查集群的完整性:

[root@db01 ~]# cd /opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb check 10.0.0.201:6380

9检查槽位是否都在合理的误差范围内:

[root@db01 src]# ./redis-trib.rb rebalance 10.0.0.201:6380

 二利用工具对redis集群进行扩容:

1添加一对主从并创建配置文件和数据目录

[root@db01 src]# mkdir -p /opt/redis_cluster/redis_{6390,6391}/{conf,logs,pid}
[root@db01 src]# mkdir -p /data/redis_cluster/redis_{6390,6391}
[root@db01 src]# cd /opt/redis_cluster/
[root@db01 redis_cluster]# cp redis_6380/conf/redis_6380.conf redis_6390/conf/redis_6390.conf
[root@db01 redis_cluster]# cp redis_6381/conf/redis_6381.conf redis_6391/conf/redis_6391.conf
[root@db01 redis_cluster]# sed -i 's#6380#6390#g' redis_6390/conf/redis_6390.conf
[root@db01 redis_cluster]# sed -i 's#6381#6391#g' redis_6391/conf/redis_6391.conf

2启动新增的主从节点:

[root@db01 ~]# sh redis_shell.sh start 6390
[root@db01 ~]# sh redis_shell.sh start 6391
[root@db01 ~]# sh redis_shell.sh ps

3将新节点添加到集群

[root@db01 ~]# cd /opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb add-node 10.0.0.201:6390 10.0.0.201:6380
[root@db01 src]# ./redis-trib.rb add-node 10.0.0.201:6391 10.0.0.201:6380

4重新分配槽位:

[root@db01 src]# pwd
/opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb reshard 10.0.0.201:6380
>>> Performing Cluster Check (using node 10.0.0.201:6380)
S: 3a9aeeeb4ac0c76a5ba3c66bb917161348582c01 10.0.0.201:6380
slots: (0 slots) slave
replicates a31deef0baf970cef6c798f1e320fb4ba5543daf
M: ae183c4181d953d1ef052e7db473951390ff9d05 10.0.0.201:6390
slots: (0 slots) master
0 additional replica(s)
M: a31deef0baf970cef6c798f1e320fb4ba5543daf 10.0.0.203:6381
slots:0-5460 (5461 slots) master
1 additional replica(s)
M: 6c4c0f7417dfc14468a73ac94a66634181546721 10.0.0.201:6391
slots: (0 slots) master
0 additional replica(s)
M: e92cd97d2c06d2cc02547bcd7ac64b9f6fb44fb5 10.0.0.203:6380
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 8cea03a478990dc7e6d21f1b671c70dd921fcdb2 10.0.0.201:6381
slots: (0 slots) slave
replicates 9e6565d8f0a8ed0d6d56437dcf4b58cbb17a3c0f
M: 9e6565d8f0a8ed0d6d56437dcf4b58cbb17a3c0f 10.0.0.202:6380
slots:5461-10922 (5462 slots) master
1 additional replica(s)
S: c37e5f309b2d07012f2efb11053c517b5f51eba0 10.0.0.202:6381
slots: (0 slots) slave
replicates e92cd97d2c06d2cc02547bcd7ac64b9f6fb44fb5
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096 #16384/4=4096因为是4个主从节点,所以除以4
What is the receiving node ID? ae183c4181d953d1ef052e7db473951390ff9d05

 5检查是否ok:

[root@db01 src]# pwd
/opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb check 10.0.0.201:6380

6检查扩容后的主从复制关系并用图表达出来:

[root@db01 src]# cd ~
[root@db01 ~]# sh redis_shell.sh login 6380
10.0.0.201:6380> cluster nodes
67858b60a11ea933e5dab3ca29e8bc46ea436bbe 10.0.0.201:6391 master - 0 1618418829049 0 connected
ecaef5e2b992205f47de8230a47e7afcad69b1a1 10.0.0.203:6381 slave bedd4b0e703454112cb0596e5f568aa5296ef6a4 0 1618418829050 6 connected
4407f27c55386e16501e28deefcb968e12502697 10.0.0.201:6381 slave 50f925532d5c3b5acc98d3ab2266aea3975c9e3f 0 1618418831567 4 connected
50f925532d5c3b5acc98d3ab2266aea3975c9e3f 10.0.0.202:6380 master - 0 1618418828042 2 connected 6827-10922
13fe096d888882cd5914edb6acdff809c048a082 10.0.0.201:6390 master - 0 1618418830054 7 connected 0-1364 5461-6826 10923-12287
be9f4f5d37052b04c63045fc631d0d336def2fdc 10.0.0.203:6380 master - 0 1618418832072 3 connected 12288-16383
85ca53d73950ad2a6b53a1af678cdf097e7b5d06 10.0.0.202:6381 slave be9f4f5d37052b04c63045fc631d0d336def2fdc 0 1618418831064 5 connected
bedd4b0e703454112cb0596e5f568aa5296ef6a4 10.0.0.201:6380 myself,master - 0 0 1 connected 1365-5460
10.0.0.201:6380>

 7根据图整理主从关系:

[root@db01 ~]# sh redis_shell.sh login 6380
10.0.0.201:6380> cluster nodes
67858b60a11ea933e5dab3ca29e8bc46ea436bbe 10.0.0.201:6391 master - 0 1618421082413 0 connected
ecaef5e2b992205f47de8230a47e7afcad69b1a1 10.0.0.203:6381 slave bedd4b0e703454112cb0596e5f568aa5296ef6a4 0 1618421079382 6 connected
4407f27c55386e16501e28deefcb968e12502697 10.0.0.201:6381 slave 50f925532d5c3b5acc98d3ab2266aea3975c9e3f 0 1618421082916 4 connected
50f925532d5c3b5acc98d3ab2266aea3975c9e3f 10.0.0.202:6380 master - 0 1618421078377 2 connected 6827-10922
13fe096d888882cd5914edb6acdff809c048a082 10.0.0.201:6390 master - 0 1618421080387 7 connected 0-1364 5461-6826 10923-12287
be9f4f5d37052b04c63045fc631d0d336def2fdc 10.0.0.203:6380 master - 0 1618421081397 3 connected 12288-16383
85ca53d73950ad2a6b53a1af678cdf097e7b5d06 10.0.0.202:6381 slave be9f4f5d37052b04c63045fc631d0d336def2fdc 0 1618421077370 5 connected
bedd4b0e703454112cb0596e5f568aa5296ef6a4 10.0.0.201:6380 myself,master - 0 0 1 connected 1365-5460
10.0.0.201:6380>
[root@db01 ~]# redis-cli -h db03 -p 6381
db03:6381> cluster replicate 13fe096d888882cd5914edb6acdff809c048a082
OK
db03:6381>
[root@db01 ~]# redis-cli -h db01 -p 6391
db01:6391> cluster replicate bedd4b0e703454112cb0596e5f568aa5296ef6a4
OK
db01:6391> cluster nodes
67858b60a11ea933e5dab3ca29e8bc46ea436bbe 10.0.0.201:6391 myself,slave bedd4b0e703454112cb0596e5f568aa5296ef6a4 0 0 0 connected
bedd4b0e703454112cb0596e5f568aa5296ef6a4 10.0.0.201:6380 master - 0 1618422286146 1 connected 1365-5460
50f925532d5c3b5acc98d3ab2266aea3975c9e3f 10.0.0.202:6380 master - 0 1618422289718 2 connected 6827-10922
ecaef5e2b992205f47de8230a47e7afcad69b1a1 10.0.0.203:6381 slave 13fe096d888882cd5914edb6acdff809c048a082 0 1618422291228 7 connected
85ca53d73950ad2a6b53a1af678cdf097e7b5d06 10.0.0.202:6381 slave be9f4f5d37052b04c63045fc631d0d336def2fdc 0 1618422288713 3 connected
4407f27c55386e16501e28deefcb968e12502697 10.0.0.201:6381 slave 50f925532d5c3b5acc98d3ab2266aea3975c9e3f 0 1618422287705 2 connected
13fe096d888882cd5914edb6acdff809c048a082 10.0.0.201:6390 master - 0 1618422290723 7 connected 0-1364 5461-6826 10923-12287
be9f4f5d37052b04c63045fc631d0d336def2fdc 10.0.0.203:6380 master - 0 1618422285644 3 connected 12288-16383
db01:6391>
db01:6391> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:8
cluster_size:4
cluster_current_epoch:7
cluster_my_epoch:1
cluster_stats_messages_sent:11849
cluster_stats_messages_received:11804
db01:6391>