三主三从配置
---首先要保证dockerqidong
systemctl start docker

---拉取redis镜像
docker pull redis:6.0.8

---运行redis实例
docker run -d --name redis-node-1 --net host --privileged=true -v /data/redis/share/redis-node-1:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6381
docker run -d --name redis-node-2 --net host --privileged=true -v /data/redis/share/redis-node-2:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6382
docker run -d --name redis-node-3 --net host --privileged=true -v /data/redis/share/redis-node-3:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6383
docker run -d --name redis-node-4 --net host --privileged=true -v /data/redis/share/redis-node-4:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6384
docker run -d --name redis-node-5 --net host --privileged=true -v /data/redis/share/redis-node-5:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6385
docker run -d --name redis-node-6 --net host --privileged=true -v /data/redis/share/redis-node-6:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6386
----参数解释
docker run:运行容器实例
--name:容器名称
--net host:使用宿主机的ip和端口,默认
--privlleged=true:获得宿主机root权限
-v:容器卷 宿主机地址:容器内部地址
redis:6.0.8:redis镜像和版本号
--cluster-enabled yes:开启redis集群
--appendonly yes:开启持久化
--port:redis端口号

---进入其中一个容器构建集群关系
root@localhost:/data# docker exec -it redis-node-1 /bin/bash
root@localhost:/data# redis-cli --cluster create 192.168.49.220:6381 192.168.49.220:6382 192.168.49.220:6383 192.168.49.220:6384 192.168.49.220:6385 192.168.49.220:6386 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168.49.220:6385 to 192.168.49.220:6381
Adding replica 192.168.49.220:6386 to 192.168.49.220:6382
Adding replica 192.168.49.220:6384 to 192.168.49.220:6383
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[0-5460] (5461 slots) master
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[5461-10922] (5462 slots) master
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[10923-16383] (5461 slots) master
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
replicates 38337527cd94e19a40a4c9837860caceadd788e9
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
....
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

---链接进入6381查看集群状态
root@localhost:/data# redis-cli -p 6381
127.0.0.1:6381> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:229
cluster_stats_messages_pong_sent:219
cluster_stats_messages_sent:448
cluster_stats_messages_ping_received:214
cluster_stats_messages_pong_received:229
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:448
127.0.0.1:6381> cluster nodes
e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385@16385 slave 38337527cd94e19a40a4c9837860caceadd788e9 0 1672669323274 2 connected
5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386@16386 slave 62c533cf3cfe52740aa9775be46fa46783553dc7 0 1672669325317 3 connected
62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383@16383 master - 0 1672669323000 3 connected 10923-16383
38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382@16382 master - 0 1672669324000 2 connected 5461-10922
90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381@16381 myself,master - 0 1672669323000 1 connected 0-5460
91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384@16384 slave 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 0 1672669324297 1 connected
主从容错切换迁移
数据读写存储
---进入单节点redis创建k值会产生错误,是因为槽位不匹配
root@localhost:/data# redis-cli -p 6381
127.0.0.1:6381> set k1 v1
(error) MOVED 12706 192.168.49.220:6383

---以集群形式创建k值,会自动给跳转到对应的节点
root@localhost:/data# redis-cli -p 6381 -c
127.0.0.1:6381> set k1 v1
-> Redirected to slot [12706] located at 192.168.49.220:6383
OK
192.168.49.220:6383>
192.168.49.220:6383> set k2 v2
-> Redirected to slot [449] located at 192.168.49.220:6381
OK
192.168.49.220:6381>
192.168.49.220:6381> set k4 v4
-> Redirected to slot [8455] located at 192.168.49.220:6382
OK
192.168.49.220:6382>


----查看集群信息
redis-cli --cluster check 192.168.111.147:6381
192.168.49.220:6381 (90e7a7aa...) -> 1 keys | 5461 slots | 1 slaves.
192.168.49.220:6383 (62c533cf...) -> 1 keys | 5461 slots | 1 slaves.
192.168.49.220:6382 (38337527...) -> 1 keys | 5462 slots | 1 slaves.
[OK] 3 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
主从切换
----当前集群状态
127.0.0.1:6381> cluster nodes
e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385@16385 slave 38337527cd94e19a40a4c9837860caceadd788e9 0 1672673392000 2 connected
5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386@16386 slave 62c533cf3cfe52740aa9775be46fa46783553dc7 0 1672673393000 3 connected
62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383@16383 master - 0 1672673391000 3 connected 10923-16383
38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382@16382 master - 0 1672673393000 2 connected 5461-10922
90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381@16381 myself,master - 0 1672673391000 1 connected 0-5460
91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384@16384 slave 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 0 1672673393886 1 connected

----停掉redi-node-1
docker stop redis-node-1

----查看实例
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4419caa2c5ac redis:6.0.8 "docker-entrypoint.s…" About an hour ago Up About an hour redis-node-6
d8b1468d8e65 redis:6.0.8 "docker-entrypoint.s…" About an hour ago Up About an hour redis-node-5
5c89d479c8dc redis:6.0.8 "docker-entrypoint.s…" About an hour ago Up About an hour redis-node-4
135cc61d4ada redis:6.0.8 "docker-entrypoint.s…" About an hour ago Up About an hour redis-node-3
d07ffe2da97a redis:6.0.8 "docker-entrypoint.s…" About an hour ago Up About an hour redis-node-2


----进入redis-node-2容器内
docker exec -it redis-node-2 /bin/bash

----集群方式链接redis
root@localhost:/data# redis-cli -p 6382 -c
127.0.0.1:6382>

----查看集群状态
127.0.0.1:6382> cluster nodes
38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382@16382 myself,master - 0 1672673600000 2 connected 5461-10922
e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385@16385 slave 38337527cd94e19a40a4c9837860caceadd788e9 0 1672673601346 2 connected
62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383@16383 master - 0 1672673600000 3 connected 10923-16383
5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386@16386 slave 62c533cf3cfe52740aa9775be46fa46783553dc7 0 1672673600000 3 connected
90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381@16381 master,fail - 1672673487943 1672673484886 1 disconnected
91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384@16384 master - 0 1672673599296 7 connected 0-5460

6381 master,fail
之前的slave节点6384成为新的master

----查看之前创建的k值
127.0.0.1:6382> get k1
-> Redirected to slot [12706] located at 192.168.49.220:6383
"v1"
192.168.49.220:6383> get k2
-> Redirected to slot [449] located at 192.168.49.220:6384
"v2"
192.168.49.220:6384> get k3
(nil)
192.168.49.220:6384> get k4
-> Redirected to slot [8455] located at 192.168.49.220:6382
"v4"

----重新启动redis-node-1
docker start redis-node-1

----查看集群状态,发现现在的6381成为了6384的slave
127.0.0.1:6382> cluster nodes
38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382@16382 myself,master - 0 1672674266000 2 connected 5461-10922
e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385@16385 slave 38337527cd94e19a40a4c9837860caceadd788e9 0 1672674266672 2 connected
62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383@16383 master - 0 1672674265000 3 connected 10923-16383
5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386@16386 slave 62c533cf3cfe52740aa9775be46fa46783553dc7 0 1672674264622 3 connected
90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381@16381 slave 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 0 1672674267000 7 connected
91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384@16384 master - 0 1672674267696 7 connected 0-5460

----如果还是想维持之前的主从关系,可以停止当前的主6384,再启动,使master切换到6384
docker stop redis-node-4
docker start redis-node-4

----查看集群状态
root@localhost:/data# redis-cli --cluster check 192.168.49.220:6381
192.168.49.220:6381 (90e7a7aa...) -> 1 keys | 5461 slots | 1 slaves.
192.168.49.220:6382 (38337527...) -> 1 keys | 5462 slots | 1 slaves.
192.168.49.220:6383 (62c533cf...) -> 1 keys | 5461 slots | 1 slaves.
[OK] 3 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
集群扩容,由三主三从扩展至四主四从
----新建两个redis实例
docker run -d --name redis-node-7 --net host --privileged=true -v /data/redis/share/redis-node-7:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6387
docker run -d --name redis-node-8 --net host --privileged=true -v /data/redis/share/redis-node-8:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6388

----进入node-7节点
docker exec -it redis-node-7 /bin/bash

----将node-7作为master节点加入原集群
redis-cli --cluster add-node 192.168.49.220:6387 192.168.49.220:6381
>>> Adding node 192.168.49.220:6387 to cluster 192.168.49.220:6381
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.49.220:6387 to make it join the cluster.
[OK] New node added correctly.

----检查集群状态
root@localhost:/data# redis-cli --cluster check 192.168.49.220:6380
Could not connect to Redis at 192.168.49.220:6380: Connection refused
root@localhost:/data# redis-cli --cluster check 192.168.49.220:6381
192.168.49.220:6381 (90e7a7aa...) -> 1 keys | 5461 slots | 1 slaves.
192.168.49.220:6382 (38337527...) -> 1 keys | 5462 slots | 1 slaves.
192.168.49.220:6383 (62c533cf...) -> 1 keys | 5461 slots | 1 slaves.
192.168.49.220:6387 (91fb5105...) -> 0 keys | 0 slots | 0 slaves. ####新的master节点没有槽位
[OK] 3 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
M: 91fb5105e5798e7af66456dae5011356ecffc525 192.168.49.220:6387
slots: (0 slots) master
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

----重新分配槽位
redis-cli --cluster reshard IP地址:端口号
redis-cli --cluster reshard 192.168.49.220:6381

  1. 新的节点加入后需要重新洗牌,4个节点AA分配 (16384/4=4096)
  2. 要分配给新加入的机器(需要输入的是节点id编号)

Docker配置redis相关_docker

----重新查看集群状态,槽位分布
root@localhost:/data# redis-cli --cluster check 192.168.49.220:6380
Could not connect to Redis at 192.168.49.220:6380: Connection refused
root@localhost:/data# redis-cli --cluster check 192.168.49.220:6381
192.168.49.220:6381 (90e7a7aa...) -> 0 keys | 4096 slots | 1 slaves.
192.168.49.220:6382 (38337527...) -> 1 keys | 4096 slots | 1 slaves.
192.168.49.220:6383 (62c533cf...) -> 1 keys | 4096 slots | 1 slaves.
192.168.49.220:6387 (91fb5105...) -> 1 keys | 4096 slots | 0 slaves.
[OK] 3 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
M: 91fb5105e5798e7af66456dae5011356ecffc525 192.168.49.220:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master ####前三个master分别分给6387一些槽位
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

----为主节点分配slave
redis-cli --cluster add-node 192.168.49.220:6388 192.168.49.220:6387 --cluster-slave --cluster-master-id 91fb5105e5798e7af66456dae5011356ecffc525-------这个是6387的编号,按照自己实际情况

----查看集群状态
root@localhost:/data# redis-cli --cluster check 192.168.49.220:6381
192.168.49.220:6381 (90e7a7aa...) -> 0 keys | 4096 slots | 1 slaves.
192.168.49.220:6382 (38337527...) -> 1 keys | 4096 slots | 1 slaves.
192.168.49.220:6383 (62c533cf...) -> 1 keys | 4096 slots | 1 slaves.
192.168.49.220:6387 (91fb5105...) -> 1 keys | 4096 slots | 1 slaves. ####已经添加上,1slae
[OK] 3 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
S: d0f1927ded3a031b0f1b150486da8d53424163ef 192.168.49.220:6388
slots: (0 slots) slave
replicates 91fb5105e5798e7af66456dae5011356ecffc525
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
M: 91fb5105e5798e7af66456dae5011356ecffc525 192.168.49.220:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
1 additional replica(s)
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

流量下去后进行集群缩容,由四主四从缩减为三主三从
####
先清除从节点
重新分配槽号
删除主节点
恢复为三主三从
###

----删除从节点6388
redis-cli --cluster del-node ip:从机端口 从机6388节点ID
root@localhost:/data# redis-cli --cluster del-node 192.168.49.220:6388 d0f1927ded3a031b0f1b150486da8d53424163ef
>>> Removing node d0f1927ded3a031b0f1b150486da8d53424163ef from cluster 192.168.49.220:6388
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.


root@localhost:/data# redis-cli --cluster check 192.168.49.220:6381
192.168.49.220:6381 (90e7a7aa...) -> 0 keys | 4096 slots | 1 slaves.
192.168.49.220:6382 (38337527...) -> 1 keys | 4096 slots | 1 slaves.
192.168.49.220:6383 (62c533cf...) -> 1 keys | 4096 slots | 1 slaves.
192.168.49.220:6387 (91fb5105...) -> 1 keys | 4096 slots | 0 slaves. #没有slave了

----重新分配槽号
redis-cli --cluster reshard 192.168.49.220:6381
root@localhost:/data# redis-cli --cluster reshard 192.168.49.220:6381
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
M: 91fb5105e5798e7af66456dae5011356ecffc525 192.168.49.220:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096
What is the receiving node ID? 90e7a7aaa66027caaeb3482be72d9df7f4c0478b #谁来接收空余的槽位,这里我选择全部给到node-1
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1: 91fb5105e5798e7af66456dae5011356ecffc525 #谁来提供槽位,node-7
Source node #2: done

。。。。。。yes

----查看集群状态
root@localhost:/data# redis-cli --cluster check 192.168.49.220:6381
192.168.49.220:6381 (90e7a7aa...) -> 1 keys | 8192 slots | 1 slaves.
192.168.49.220:6382 (38337527...) -> 1 keys | 4096 slots | 1 slaves.
192.168.49.220:6383 (62c533cf...) -> 1 keys | 4096 slots | 1 slaves.
192.168.49.220:6387 (91fb5105...) -> 0 keys | 0 slots | 0 slaves. #没有槽位,没有slave
[OK] 3 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[0-6826],[10923-12287] (8192 slots) master
1 additional replica(s)
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
M: 91fb5105e5798e7af66456dae5011356ecffc525 192.168.49.220:6387
slots: (0 slots) master
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

---删除master 6387
redis-cli --cluster del-node ip:端口 6387节点ID
root@localhost:/data# redis-cli --cluster del-node 192.168.49.220:6387 91fb5105e5798e7af66456dae5011356ecffc525
>>> Removing node 91fb5105e5798e7af66456dae5011356ecffc525 from cluster 192.168.49.220:6387
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.


----检查集群状态,已经恢复到之前的三主三从
root@localhost:/data# redis-cli --cluster check 192.168.49.220:6381
192.168.49.220:6381 (90e7a7aa...) -> 1 keys | 8192 slots | 1 slaves.
192.168.49.220:6382 (38337527...) -> 1 keys | 4096 slots | 1 slaves.
192.168.49.220:6383 (62c533cf...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 3 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.49.220:6381)
M: 90e7a7aaa66027caaeb3482be72d9df7f4c0478b 192.168.49.220:6381
slots:[0-6826],[10923-12287] (8192 slots) master
1 additional replica(s)
S: 5969947db8a94c452f8d67d9d15310b620c35169 192.168.49.220:6386
slots: (0 slots) slave
replicates 62c533cf3cfe52740aa9775be46fa46783553dc7
M: 38337527cd94e19a40a4c9837860caceadd788e9 192.168.49.220:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: 62c533cf3cfe52740aa9775be46fa46783553dc7 192.168.49.220:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: 91b59c22cbcc5f1cd8364e0a73bbc2309173816d 192.168.49.220:6384
slots: (0 slots) slave
replicates 90e7a7aaa66027caaeb3482be72d9df7f4c0478b
S: e7f591a80011233739fed3cfad79dfd2bff5d9e3 192.168.49.220:6385
slots: (0 slots) slave
replicates 38337527cd94e19a40a4c9837860caceadd788e9
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.