文章目录

  • 一、redis 依赖和配置源
  • 二、消费者
  • 2.1、生产者和消息公共的代码
  • 消息队列 key
  • 2.2、redis 消息队列相关配置
  • 1)、MsgConsumer 定义公共消息接口
  • 2)、RedisQueueConfiguration 消息队列JavaBean
  • 3)、RedisQueueListener 消息队列监听
  • 4)、RedisMqConsumerContainer 消息分发
  • 5)、RedisConfig :RedisTemplate、消息监听的配置
  • 2.3、业务相关
  • 1)、业务消息1 —— 数据校验
  • 2)、业务消息2—— 数据保存
  • 三、生产者
  • 1)、RedisConfig :RedisTemplate配置
  • 2)、RedisQueueSender 消息发送
  • 四、HelloController 测试批量发送消息
  • 五、消费异常时,无法重复消费的问题
  • 1、消息队列 key
  • 2、业务消息1 —— 数据校验优化版
  • 3、将 key_bak 队列中的数据重新添加到key中
  • 4、 总结
  • 六、测试说明


一、redis 依赖和配置源

<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>
server:
  port: 80

spring : 
  redis:
    host: 127.0.0.1
    port: 6379
    password: 123456
    # 连接超时时间(毫秒)
    timeout: 1000
    exptime: 3600
    jedis:
      pool:
        # 连接池中的最大空闲连接
        max-idle: 8
        # 连接池中的最小空闲连接
        min-idle: 10
        # 连接池最大连接数(使用负值表示没有限制)
        max-active: 100
        # 连接池最大阻塞等待时间(使用负值表示没有限制)
        max-wait: -1

二、消费者

2.1、生产者和消息公共的代码

消息队列 key

/**
 * 业务的消息队列 key
 */
public class RedisBatchQueueKey {

	/** 数据校验  **/
	public static final String BATCH_DATA_VERIFY="batch_data_verify";
	
	/** 数据写入到hbase  */
	public static final String BATCH_WRITE_HBASE="batch_write_hbase";
}

2.2、redis 消息队列相关配置

1)、MsgConsumer 定义公共消息接口

public interface MsgConsumer {
	
    void onMessage(Object message);

    void onError(Object message, Exception e);
}

2)、RedisQueueConfiguration 消息队列JavaBean

public class RedisQueueConfiguration {
    /**
     * 队列名称
     */
    private String queue;
    /**
     * 消费者
     */
    private MsgConsumer consumer;

    private RedisQueueConfiguration() {
    }

    public static Builder builder() {
        return new Builder();
    }

    String getQueue() {
        return queue;
    }

    MsgConsumer getConsumer() {
        return consumer;
    }

    public static class Builder {
        private RedisQueueConfiguration configuration = new RedisQueueConfiguration();

        public RedisQueueConfiguration defaultConfiguration(MsgConsumer consumer) {
            configuration.consumer = consumer;
            configuration.queue = consumer.getClass().getSimpleName();
            return configuration;
        }

        public Builder queue(String queue) {
            configuration.queue = queue;
            return this;
        }

        public Builder consumer(MsgConsumer consumer) {
            configuration.consumer = consumer;
            return this;
        }

        public RedisQueueConfiguration build() {
            if (configuration.queue == null || configuration.queue.length() == 0) {
                if (configuration.consumer != null) {
                    configuration.queue = configuration.getClass().getSimpleName();
                }
            }
            return configuration;
        }
    }
}

3)、RedisQueueListener 消息队列监听

import java.util.concurrent.TimeUnit;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.QueryTimeoutException;
import org.springframework.data.redis.core.RedisTemplate;

public class RedisQueueListener implements Runnable {
	
	public static final Logger log = LoggerFactory.getLogger(RedisQueueListener.class);
	private RedisTemplate<String, Object> redisTemplate;
	private String queue;
	private MsgConsumer consumer;

	public RedisQueueListener(RedisTemplate<String, Object> redisTemplate, String queue, MsgConsumer consumer) {
		this.redisTemplate = redisTemplate;
		this.queue = queue;
		this.consumer = consumer;
	}

	@Override
	public void run() {
		log.info("RedisQueueListener 正在启动...queue:{}", queue);
		while (RedisMqConsumerContainer.run) {
			try {
				Object msg = redisTemplate.opsForList().rightPop(queue, 30, TimeUnit.SECONDS);
				if (msg != null) {
					try {
						consumer.onMessage(msg);
					} catch (Exception e) {
						consumer.onError(msg, e);
					}
				}
			} catch (QueryTimeoutException ignored) {
			} catch (Exception e) {
				if (RedisMqConsumerContainer.run) {
					log.error("异常 Queue:{}", queue, e);
				} else {
					log.info("RedisQueueListener 退出...queue:{}", queue);
				}
			}
		}
	}
}

4)、RedisMqConsumerContainer 消息分发

import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.data.redis.core.RedisTemplate;

public class RedisMqConsumerContainer {
	
    private static final Logger log = LoggerFactory.getLogger(RedisMqConsumerContainer.class);
    private RedisTemplate<String, Object> redisTemplate;
    
    private Map<String, RedisQueueConfiguration> consumerMap = new HashMap<>();
    static boolean run;
    private ExecutorService exec;

    public RedisMqConsumerContainer(RedisTemplate<String, Object> redisTemplate) {
        this.redisTemplate = redisTemplate;
    }

    public void addConsumer(RedisQueueConfiguration configuration) {
        if (consumerMap.containsKey(configuration.getQueue())) {
            log.warn("Key:{} 已经存在,", configuration.getQueue());
        }
        if (configuration.getConsumer() == null) {
            log.warn("Key:{} consumer 为空,无法对其进行配置", configuration.getQueue());
        }
        consumerMap.put(configuration.getQueue(), configuration);
    }

    public void destroy() {
        run = false;
        this.exec.shutdown();
        log.info("RedisQueueListener 正在退出...");
        while (!this.exec.isTerminated()) {

        }
        log.info("RedisQueueListener 退出");
    }

    public void init() {
        run = true;
        this.exec = Executors.newCachedThreadPool(r -> {
            final AtomicInteger threadNumber = new AtomicInteger(1);
            return new Thread(r, "RedisMQListener-" + threadNumber.getAndIncrement());
        });
        consumerMap = Collections.unmodifiableMap(consumerMap);
        consumerMap.forEach((k, v) -> exec.submit(new RedisQueueListener(redisTemplate, v.getQueue(), v.getConsumer())));
    }

}

5)、RedisConfig :RedisTemplate、消息监听的配置

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;

import com.aop8.common.config.redis.batch.BatchDataVerifyListener;
import com.aop8.common.config.redis.batch.BatchWriteHbaseListener;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;

@Configuration
public class RedisConfig {

	@Bean
	public RedisTemplate<String, Object> redisTemplate(RedisConnectionFactory redisConnectionFactory) {
		RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
        redisTemplate.setConnectionFactory(redisConnectionFactory);
        //使用Jackson2JsonRedisSerializer来序列化和反序列化redis的value值(默认使用JDK的序列化方式)
        Jackson2JsonRedisSerializer jackson2JsonRedisSerializer = new Jackson2JsonRedisSerializer(Object.class);
        ObjectMapper objectMapper = new ObjectMapper();
        objectMapper.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);
        objectMapper.enableDefaultTyping(ObjectMapper.DefaultTyping.NON_FINAL);
        jackson2JsonRedisSerializer.setObjectMapper(objectMapper);

        //使用StringRedisSerializer来序列化和反序列化redis的key值
        RedisSerializer redisSerializer = new StringRedisSerializer();
        //key
        redisTemplate.setKeySerializer(redisSerializer);
        redisTemplate.setHashKeySerializer(redisSerializer);
        //value
        redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);
        redisTemplate.setHashValueSerializer(jackson2JsonRedisSerializer);

        redisTemplate.afterPropertiesSet();
        		
		return redisTemplate;
	}

	/**
	 * 配置消息监听
	 *
	 * @param batchDataVerifyListener  监听的业务消息1 —— 数据校验
	 * @param batchWriteHbaseListener  监听的业务消息2 —— 数据保存
	 * @return 消费者容器
	 */
	@Bean(initMethod = "init", destroyMethod = "destroy")
	public RedisMqConsumerContainer redisMqConsumerContainer(@Autowired RedisTemplate<String, Object> redisTemplate//
			,BatchDataVerifyListener batchDataVerifyListener//
			,BatchWriteHbaseListener batchWriteHbaseListener//			
			) {
		RedisMqConsumerContainer config = new RedisMqConsumerContainer(redisTemplate);

		config.addConsumer(RedisQueueConfiguration.builder().queue(RedisBatchQueueKey.BATCH_DATA_VERIFY).consumer(batchDataVerifyListener).build());
		config.addConsumer(RedisQueueConfiguration.builder().queue(RedisBatchQueueKey.BATCH_WRITE_HBASE).consumer(batchWriteHbaseListener).build());		
		return config;
	}	

}

2.3、业务相关

1)、业务消息1 —— 数据校验

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.aop8.common.config.redis.MsgConsumer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Future;

/** 
 * 业务消息1——数据校验 
 **/
@Component
public class BatchDataVerifyListener implements MsgConsumer {
	
    private static Logger log = LoggerFactory.getLogger(BatchDataVerifyListener.class);
    
    @Override
    public void onMessage(Object message) {
        log.info("收到消息Obj: " + message.toString());
        try {
			Thread.sleep(1000*5);
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
        log.info("处理完毕消息Obj: "+ message);
        int a=3;
        System.out.println(task);
        //int b=a/0;
    }

    @Override
    public void onError(Object message, Exception e) {
        log.error("发生错误,消息Obj:{}", message, e);
    }
}

2)、业务消息2—— 数据保存

import com.aop8.common.config.redis.RedisQueueSender;
import com.aop8.common.hbase.service.HBaseService;
import com.aop8.service.WsService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.aop8.common.config.redis.MsgConsumer;
import java.util.List;
import java.util.Map;

/** 
 * 业务消息1 —— 数据写入到hbase  
 */
@Component
public class BatchWriteHbaseListener implements MsgConsumer {
	
    private static Logger log = LoggerFactory.getLogger(BatchWriteHbaseListener.class); 

    @Override
    public void onMessage(Object message) {
        log.info("收到消息222222: " + message.toString());
        try {
			Thread.sleep(1000*5);
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
        log.info("处理完毕消息222222: "+ message.toString());        
    }

    @Override
    public void onError(Object message, Exception e) {
        log.error("发生错误,消息222:{}", message, e);
    }
}

三、生产者

1)、RedisConfig :RedisTemplate配置

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;

import com.aop8.common.config.redis.batch.BatchDataVerifyListener;
import com.aop8.common.config.redis.batch.BatchWriteHbaseListener;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;

@Configuration
public class RedisConfig {

	@Bean
	public RedisTemplate<String, Object> redisTemplate(RedisConnectionFactory redisConnectionFactory) {
		RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
        redisTemplate.setConnectionFactory(redisConnectionFactory);
        //使用Jackson2JsonRedisSerializer来序列化和反序列化redis的value值(默认使用JDK的序列化方式)
        Jackson2JsonRedisSerializer jackson2JsonRedisSerializer = new Jackson2JsonRedisSerializer(Object.class);
        ObjectMapper objectMapper = new ObjectMapper();
        objectMapper.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);
        objectMapper.enableDefaultTyping(ObjectMapper.DefaultTyping.NON_FINAL);
        jackson2JsonRedisSerializer.setObjectMapper(objectMapper);

        //使用StringRedisSerializer来序列化和反序列化redis的key值
        RedisSerializer redisSerializer = new StringRedisSerializer();
        //key
        redisTemplate.setKeySerializer(redisSerializer);
        redisTemplate.setHashKeySerializer(redisSerializer);
        //value
        redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);
        redisTemplate.setHashValueSerializer(jackson2JsonRedisSerializer);

        redisTemplate.afterPropertiesSet();
        		
		return redisTemplate;
	}	
}

2)、RedisQueueSender 消息发送

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;

@Component
public class RedisQueueSender {
	
	@Autowired
    private RedisTemplate<String, Object> redisTemplate;

    public RedisQueueSender(RedisTemplate<String, Object> redisTemplate) {
        this.redisTemplate = redisTemplate;
    }

    public void sendMsg(String queue, Object msg) {
        redisTemplate.opsForList().leftPush(queue, msg);

    }
}

四、HelloController 测试批量发送消息

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;

import com.aop8.common.config.redis.RedisQueueSender;
import com.aop8.common.config.redis.batch.RedisBatchQueueKey;

@RestController
@RequestMapping("/bjifia/hello")
public class HelloController {
	@Autowired
	private RedisQueueSender redisQueueSender;
	
	@ResponseBody
	@RequestMapping("/batchDataVerify")
	public String batchDataVerify() {		
		for (int i = 0; i < 10; i++) {
			String batchnum ="batchnum"+i;
			String sourceCode ="sourceCode"+i;
			String tableName ="tableName"+i;
			
			String msg=batchnum+"|"+sourceCode+"|"+tableName;
			redisQueueSender.sendMsg(RedisBatchQueueKey.BATCH_DATA_VERIFY, "\""+msg+"\"");
		}		
		return "提交batchDataVerify成功";
	}

	@ResponseBody
	@RequestMapping("/batchWriteHbase")
	public String batchWriteHbase() {		
		for (int i = 0; i < 10; i++) {
			String batchnum ="batchnum"+i;
			String sourceCode ="sourceCode"+i;
			String tableName ="tableName"+i;
			
			String msg=batchnum+"|"+sourceCode+"|"+tableName;
			redisQueueSender.sendMsg(RedisBatchQueueKey.BATCH_WRITE_HBASE, "\""+msg+"\"");
		}		
		return "提交batchWriteHbase成功";
	}
}

五、消费异常时,无法重复消费的问题

每个redis key中增中一个备用的key,当消费异常时,将消费数据存到 备用的key 中,
然后通过 request请求(或者定时器等方式)将备用key中的数据 重新 添加到key队列的尾部 。

1、消息队列 key

/**
 * 业务的消息队列 key
 */
public class RedisBatchQueueKey {

	/** 数据校验  **/
	public static final String BATCH_DATA_VERIFY="batch_data_verify";
	
	public static final String BATCH_DATA_VERIFY_BAK="batch_data_verify_bak";
	
	/** 数据写入到hbase  */
	public static final String BATCH_WRITE_HBASE="batch_write_hbase";
	
	public static final String BATCH_WRITE_HBASE_BAK="batch_write_hbase_bak";
}

2、业务消息1 —— 数据校验优化版

在 onError() 方法中,如果出现异常时,将数据放到 备用的key 的队列中 。

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.aop8.common.config.redis.MsgConsumer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Future;

/** 
 * 业务消息1——数据校验 
 **/
@Component
public class BatchDataVerifyListener implements MsgConsumer {
	
    private static Logger log = LoggerFactory.getLogger(BatchDataVerifyListener.class);
    
    @Autowired
	private RedisQueueSender redisQueueSender;
	
    @Override
    public void onMessage(Object message) {
        log.info("收到消息Obj: " + message.toString());
        try {
			Thread.sleep(1000*5);
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
        log.info("处理完毕消息Obj: "+ message);
        int a=3;
        System.out.println(task);
        //int b=a/0;
    }

    @Override
    public void onError(Object message, Exception e) {
        log.error("发生错误,消息Obj:{}", message, e);
        redisQueueSender.sendMsg(RedisBatchQueueKey.BATCH_DATA_VERIFY_BAK, message);
    }
}

3、将 key_bak 队列中的数据重新添加到key中

通过 request 请求(或者定时器 等其他方式也可以),将 key_bak 中的数据重新添加到key中。

@Controller
@RequestMapping("/test")
public class TestController {

	@Autowired
	private RedisTemplate redisTemplate;
	
	// 每请求一次只能读取一条数据,此处可以优化为循环或者阻塞的方式。
	@ResponseBody
	@GetMapping("/move")
	public Object move(){		
		Object obj=redisTemplate.opsForList()//
					.rightPopAndLeftPush(RedisBatchQueueKey.BATCH_DATA_VERIFY_BAK, RedisBatchQueueKey.BATCH_DATA_VERIFY);
		System.out.println("rpoplpush = "+obj);
		return 222;
	}
}

4、 总结

总结起来,当出现异常时,将数据放到 key_bak 的队列中,然后能过请求(定时器等)将key_bak 队列中的数据重新添加到key队列中。

还有一种方式,不需要 key_bak 队列 , 当出现异常时,把 数据直接放到key队列的尾部即可。缺点是 当出现异常时,不容易被发现,而且容易出现死循环。

六、测试说明

本次使用 redis 消息队列 实现 生产者/消费者 模式,为了测试出一条消息只能被消费一次,所以要将 消费者 的代码复制多份(只需要修改端口),启动多个消费者,同时监听,以便测试 同一消息是否被重复消息

生产者、消费者只是相对的概念。比如当消费者 消费掉一个消息后,可能会发送一条消息,通知后续的消息流程进行下去,此时的消息者变成了生产者。