北京、广州两套redis集群环境
我们的需求是将北京redis集群数据同步广州redis
废话不多说 直接上代码
1、导出脚本sync_export_redis30_aof.sh
sync_export_redis30_aof.sh
#行数
offset=''
#端口号
port=''
#redis安装路径-测试
#install_dir="/opt/web_app/redis-3.0.0"
#生产
install_dir="/opt/supp_app/redis3.0.0"
#迁移临时存放数据路径
result_dir="${install_dir}/data/export"
#tmp 临时文件
aof_tmp_dir="${install_dir}/tmp"
#备份数据存在路径
aof_bak_dir="${install_dir}/bak"
#依赖redis工具-测试
#redis_cli_command="/usr/local/bin/redis-cli"
#redis_check_aof="/usr/local/bin/redis-check-aof"
#生产
redis_cli_command="${install_dir}/src/redis-cli"
redis_check_aof="${install_dir}/src/redis-check-aof"
cur_time=`date "+%Y%m%d%H%M%S"`
#脚本执行日志
log="${install_dir}/aof.log"
printlog()
{
sysdate=`date "+%Y-%m-%d %H:%M:%S"`
msg="[$1] $sysdate - $port - $2"
echo $msg
echo $msg >> $log 2>&1
}
#create directory if it doesn't exist
if [ ! -d ${result_dir} ]; then
mkdir -p $result_dir
fi
if [ ! -d ${aof_tmp_dir} ]; then
mkdir -p $aof_tmp_dir
fi
if [ ! -d ${aof_bak_dir} ]; then
mkdir -p $aof_bak_dir
fi
#获取当前服务器eth0 IP
hostip=`cat /etc/sysconfig/network-scripts/ifcfg-eth0 |grep IPADDR|cut -d '=' -f2`
#1、获取当前服务器redis启动端口集合
ports=` ps -ef | grep redis-server| grep -v grep|awk -F: '{print $NF}' |cut -d " " -f1`
printlog "info" "redis_ports : $ports"
#2、连接任意一个端口
port=`echo $ports|cut -d ' ' -f1`
#获取redis集群信息
redis_masters=`${redis_cli_command} -c -h ${hostip} -p ${port} cluster nodes | grep master |cut -d ' ' -f2,9`
printlog "info" "redis_masters : $redis_masters"
arr=($redis_masters)
#3、判断主节点 是否在本机 在执行备份 不在 打印日志
for(( i=0;i<${#arr[@]};i=$i+2))
do
printlog "info" "redis export aof start ###################"
ip=`echo ${arr[$i]}|cut -d ':' -f1`
ip_port=`echo ${arr[$i]}|cut -d ':' -f2`
partition=${arr[$i+1]}
printlog "info" "redis_IP : $ip , redis_port:$ip_port , redis_partition :$partition"
#判断服务器IP 是否与集群 IP一致
if [ "$ip" == "$hostip" ]
then
port=$ip_port
#aof 日志文件存在位置
#测试
#aof_data_file="${install_dir}/redis-node-${port}/appendonly.aof"
#生产
aof_data_file="${install_dir}/redis-node-${port}/appendonly-${port}.aof"
redis_cluster_type="redis30_cluster"
tmp_aof_file1="${aof_tmp_dir}/${redis_cluster_type}_${port}_tmp1"
tmp_aof_file2="${aof_tmp_dir}/${redis_cluster_type}_${port}_tmp2"
#获取文件总行数
total_line=`wc -l $aof_data_file | grep -o '[0-9]\+ '`
tmp_key="REDIS:DATA:CLUSTE:"$partition
tmp_value=`${redis_cli_command} -c -h ${hostip} -p ${port} get $tmp_key `
if [ -z "$tmp_value" ]
then
redis_key=''
else
redis_key=`echo $tmp_value |base64 -d -i`
fi
printlog "info" "tmp_key : $tmp_key ; tmp_value : $tmp_value ; redis_key:$redis_key"
if [ -z "$redis_key" ]
then
#分区key为空,手动执行重新 全量同步
echo "befor $port seting:"
echo `${redis_cli_command} -p ${port} config get auto-aof-rewrite-min-size`
echo `${redis_cli_command} -p ${port} config set auto-aof-rewrite-min-size 3000000000`
echo "after $port seting:"
echo `${redis_cli_command} -p ${port} config get auto-aof-rewrite-min-size`
echo `${redis_cli_command} -p ${port} BGREWRITEAOF`
offset=0
else
#通过aof文件 查找key所在行数
line_str=`cat -n $aof_data_file|grep -a $redis_key`
printlog "info" "line_str : $line_str "
line_num=`echo $line_str|cut -d ' ' -f1`
printlog "info" "line_num : $line_num"
offset=`expr $line_num + 2`
fi
printlog "info" "total_line:$total_line,offset:$offset"
if [ $total_line -le $offset ] ; then
printlog "info" "aof_data_file length is less than offset reset offsetsize = 0 "
offset=$total_line
fi
#get good byte for increament aof file
tail -n +"$(($offset + 1))" $aof_data_file > "$tmp_aof_file1"
ok_bytes=`$redis_check_aof $tmp_aof_file1 | grep -o 'ok_up_to=[0-9]\+'`
printlog "info" "ok_bytes:$ok_bytes,offset:$offset"
if [[ -z $ok_bytes ]] ; then
printlog "info" "could't get ok bytes from temp aof file"
continue
fi
ok_bytes=${ok_bytes//ok_up_to=}
if [[ $ok_bytes -le 0 ]] ; then
printlog "info" "no ok bytes for skip line $offset"
continue
fi
#create good aof file
head -c $ok_bytes $tmp_aof_file1 > $tmp_aof_file2
ok_lines=`wc -l $tmp_aof_file2 | cut -d' ' -f1`
offset=$((offset + ok_lines))
aof_bak_name="${partition}_${cur_time}.aof"
mv $tmp_aof_file2 "${result_dir}/${aof_bak_name}"
start=`echo $partition|cut -d '-' -f1`
end=`echo $partition|cut -d '-' -f2`
printlog "info" "redis_partition_start: $start,redis_partition_end:$end"
#生成新key写入文件 同时删除 旧key
new_key='REDIS:DATA:SYNC:'
while [ "1" = "1" ]
do
#获取linux uuid 的校验码
uuid=`cat /proc/sys/kernel/random/uuid| cksum | cut -f1 -d " "`
new_key="$new_key""$uuid"
result=$(java CRC16 $new_key)
#printlog "info" "$result >=$start,$result<=$end"
if [ $result -ge $start -a $result -le $end ]
then
break
fi
done
printlog "info" "new_key:$new_key,redis_key:$redis_key"
#插入新 key
echo `${redis_cli_command} -c -h ${hostip} -p ${port} set ${new_key} "$cur_time"`
base64_encode_key=`echo $new_key |base64 -i`
printlog "info" "tmp_key:$tmp_key,base64_encode_key:$base64_encode_key"
#更新加密vlalue
echo `${redis_cli_command} -c -h ${hostip} -p ${port} set ${tmp_key} "$base64_encode_key"`
#旧key存在 删除
if [ -n "$redis_key" ]; then
echo `${redis_cli_command} -c -h ${hostip} -p ${port} del ${redis_key}`
fi
fi
printlog "info" "redis export aof end ###################"
done;
#压缩迁移aof文件
for one_aof_file in `ls -1 ${result_dir}/* | sort`; do
bname=`basename $one_aof_file`
dname=`dirname $one_aof_file`
cur_pwd=`pwd`
tmp_name="/tmp/${bname}.tar.gz"
cd $dname
tar zcf $tmp_name $bname
cd $cur_pwd
printlog 'info', "rsync -avz --progress --bwlimit=4000 --password-file=/etc/rsyncd.secrets $tmp_name content_sync@10.230.40.54::content_sync/data"
echo "rsync -avz --progress --bwlimit=2500 --password-file=/etc/rsyncd.secrets $tmp_name content_sync@10.230.40.54::content_sync/data"
rsync -avz --progress --bwlimit=2500 --password-file=/etc/rsyncd.secrets $tmp_name content_sync@10.230.40.54::content_sync
printlog "info" " finish rsync,tmpname:$tmp_name"
if [ $? -eq 0 ] ; then
full_bak_dir="${aof_bak_dir}/`date +%Y%m%d`"
if [ ! -d $full_bak_dir ]; then
mkdir -p "$full_bak_dir"
fi
if [[ $one_aof_file =~ $aof_bak_name ]] ; then
echo "mv ${one_aof_file}/ ${full_bak_dir}"
mv $one_aof_file $full_bak_dir
fi
else
sleep 1
fi
rm $tmp_name
done
View Code
CRC16.java
/**
* CRC-CCITT 算法校验类
*
* @author amadowang
* @version [版本号, Aug 29, 2011]
* @see [相关类/方法]
* @since [产品/模块版本]
* @date 2012-10-24
*/
public class CRC16 {
private char[] crc_tb = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, 0x2462, 0x3443,
0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, 0x3653, 0x2672, 0x1611, 0x0630,
0x76d7, 0x66f6, 0x5695, 0x46b4, 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861,
0x2802, 0x3823, 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, 0xedae, 0xfd8f,
0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, 0xff9f, 0xefbe, 0xdfdd, 0xcffc,
0xbf1b, 0xaf3a, 0x9f59, 0x8f78, 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025,
0x7046, 0x6067, 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xa7db, 0xb7fa,
0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, 0xd94c, 0xc96d, 0xf90e, 0xe92f,
0x99c8, 0x89e9, 0xb98a, 0xa9ab, 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8,
0xabbb, 0xbb9a, 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, 0x6e17, 0x7e36,
0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 };
public char caluCRC(byte[] pByte) {
int len = pByte.length;
char crc;
byte da;
crc = 0x0;
int i = 0;
while (len-- != 0) {
da = (byte) (crc / 256);
crc <<= 8;
int num = da ^ pByte[i];
if (num < 0)
num += 256;
crc ^= crc_tb[num];
++i;
}
return crc;
}
public static void main(String[] args) {
CRC16 crc = new CRC16();
// byte[] test = "SSO_MOBILE-SSO_APP-C487C9139AC6D1CD889853B598747222".getBytes();
byte[] test = args[0].getBytes();
char ch = crc.caluCRC(test);
// 以下是为了测试方便进行的处理,因为char数据类型很多是不可显示的
int i = (int) ch;
// String str = Integer.toHexString(i);
// System.out.println("CRC十六进制字符串是:" + str);
int resut = i % 16384;
System.out.println(resut);
// System.exit(resut);
}
}
View Code
2、导入脚本import_redis30_aof
#install_dir="/opt/supp_app/redis_3.0"
install_dir="/opt/supp_app/sylredis/redis-3.0.7"
data_dir="${install_dir}/data/import"
bak_dir="${install_dir}/bak"
#redis_cli_command="${install_dir}/src/redis-cli"
redis_cli_command="/usr/local/bin/redis-cli"
log="${install_dir}/aof.log"
#cur_time=`date "+%Y%m%d%H%M%S"`
printlog()
{
sysdate=`date "+%Y-%m-%d %H:%M:%S"`
msg="[$1] $sysdate - $port - $2"
echo $msg
echo $msg >> $log 2>&1
}
#获取当前服务器IP
hostip=`cat /etc/sysconfig/network-scripts/ifcfg-eth0 |grep IPADDR|cut -d '=' -f2`
while true; do
for one_tar_file in `ls -1 ${data_dir}/* 2>/dev/null | sort`; do
printlog "info", "redis import aof start ####################"
#1、获取当前服务器redis启动端口第一个
tmp_port=` ps -ef | grep redis-server| grep -v grep|awk -F: '{print $NF}' |cut -d " " -f1 |head -n 1`
printlog "info" "redis_port : $tmp_port"
#2、通过文件名获取redis分区范围
bname=`basename $one_tar_file`
aof_fname=${bname%%.tar.gz}
name_start=${aof_fname%%_*}
#3、连接任意一个端口 获取该分区的IP以及端口
printlog "info", "$redis_cli_command -c -h $hostip -p ${tmp_port} cluster nodes|grep ${name_start} |cut -d' ' -f2|cut -d ':' -f1| head -n 1"
ip=`$redis_cli_command -c -h $hostip -p ${tmp_port} cluster nodes|grep ${name_start} |cut -d' ' -f2|cut -d ':' -f1| head -n 1`
printlog "info", "$redis_cli_command -c -h $hostip -p ${tmp_port} cluster nodes|grep ${name_start} |cut -d' ' -f2|cut -d ':' -f2| head -n 1"
port=`$redis_cli_command -c -h $hostip -p ${tmp_port} cluster nodes|grep ${name_start} |cut -d' ' -f2|cut -d ':' -f2| head -n 1`
tname="/tmp/${aof_fname}"
printlog "info", "starting processing $one_tar_file "
printlog "info", "name_start : $name_start , ip : $ip , port : $port"
tar -zxvf $one_tar_file -C /tmp >> /tmp/tar.log
if [ $? -eq 0 ] ; then
echo "good bname:$bname, tname:$tname"
filesize=$(stat -c '%s' ${tname})
if [[ $filesize -gt 300000000 ]]; then
echo "start flushall"
$redis_cli_command -h ${ip} -p ${port} flushall
fi
printlog "info", "cat $tname | $redis_cli_command --pipe -h ${ip} -p ${port}"
cat $tname | $redis_cli_command --pipe -h ${ip} -p ${port}
#mv $name $bak
full_bak_dir="${bak_dir}/`date +%Y%m%d`"
if [ ! -d $full_bak_dir ]; then
mkdir -p "$full_bak_dir"
fi
mv $one_tar_file $full_bak_dir
rm $tname
printlog "info", "process $one_tar_file ok"
else
printlog "Error", "tar -zxvf $one_tar_file -C /tmp" $?
fi
printlog "info", "redis import aof end ####################"
done
sleep 10
done
View Code
使用步骤
(1)导出脚本 sync_export_redis30_aof.sh :
定时间隔十分钟执行 sync_export_redis30_aof.sh 该脚本 每次通过redis 分区 生成aof日志 并进行压缩传输到广州机房指定的服务器上
部署:sync_export_redis30_aof.sh CRC16.class 两个部署在一起
修改:修改该脚本中 install_dir=redis安装目录
检查:脚本中aof_data_file aof文件所在位置是否正确
注:北京机房使用,redis集群服务器 都需要安装
(2)导入脚本 import_redis30_aof :
后台执行 import_redis30_aof.sh 该脚本 ,每次检查执行目录是否存在aof压缩文件 存在这执行导入功能。
部署: 只部署 import_redis30_aof.sh
修改:修改该脚本中 install_dir=redis安装目录
检查:脚本中data_dir= 需要导入的文件所在位置是否正确
注: 广州机房使用, 部署在redis集群中的任意一台即可。