Netty:
在之前,我们实现了基于传统BIO方式的传输模式。这里,我们将使用效率更高、更方便的Netty进行传输。
实现过程如下:
一、导入依赖
1.导入Netty依赖
RPC-CORE
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
<version>4.1.50.Final</version>
</dependency>
2.导入jackson依赖
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>2.11.0</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>2.11.0</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>2.11.0</version>
</dependency>
二、序列化
1.序列化接口
创建CommonSerializer接口(switch case 方便后续添加其他序列化方法)
后期让不同的序列化方式来实现该接口
package com.t598.core.serializer;
public interface CommonSerializer {
byte[] serializer(Object object);
Object deserialize(byte[] bytes, Class<?> clazz) throws Exception;
int getCode();
static CommonSerializer getByte(int code) {
switch (code) {
case 1:
return new JsonSerializer();
default:
return null;
}
}
}
2.实现JSON的序列化器
使用JSON序列化器无法保证反序列化后仍然为原实例类型,后续会修改
package com.t598.core.serializer;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.t598.common.entity.RpcRequest;
import com.t598.common.enumeration.SerializerCode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
public class JsonSerializer implements CommonSerializer{
private static final Logger logger = LoggerFactory.getLogger(JsonSerializer.class);
/*
序列化对象
Jackson ObjectMapper可以从Java对象创建JSON.
*/
private ObjectMapper objectMapper = new ObjectMapper();
/**
* 实现序列化
* @param object
* @return
*/
@Override
public byte[] serializer(Object object) {
try {
// 将对象值object序列化为字节数组
return objectMapper.writeValueAsBytes(object);
} catch (JsonProcessingException e) {
logger.error("序列化时发生错误:{}", e.getMessage());
e.printStackTrace();
return null;
}
}
/**
* 反序列化
* @param bytes
* @param clazz
* @return
* @throws Exception
*/
@Override
public Object deserialize(byte[] bytes, Class<?> clazz) throws Exception {
try {
// 根据clazz将bytes反序列换为对应的实例,还需要判断反序列化后实例类型是否正确
// 因为RepRequest中有一个Object的数据,进行反序列时可能会失败,
Object obj = objectMapper.readValue(bytes, clazz);
if (obj instanceof RpcRequest) {
obj = handleRequest(obj);
}
return obj;
} catch (IOException e) {
logger.error("反序列化时发生错误:{}", e.getMessage());
e.printStackTrace();
return null;
}
}
/*
获取序列化代码,方便后期调用不同的序列化方法
*/
@Override
public int getCode() {
return SerializerCode.valueOf("JSON").getCode();
}
private Object handleRequest(Object obj) throws IOException {
RpcRequest rpcRequest = (RpcRequest) obj;
for (int i = 0; i < rpcRequest.getParamTypes().length; i++) {
Class<?> clazz = rpcRequest.getParamTypes()[i];
// 确定此Class对象表示的类或接口是否与指定的Class参数表示的类或接口相同,
// 或者是其超类或超接口。 如果是,则返回true ; 否则返回false 。
if (!clazz.isAssignableFrom(rpcRequest.getParameters()[i].getClass())) {
// 如果不对应,则将这个参数重新反序列化为对应的参数类型。
byte[] bytes = objectMapper.writeValueAsBytes(rpcRequest.getParameters()[i]);
rpcRequest.getParameters()[i] = objectMapper.readValue(bytes, clazz);
}
}
return rpcRequest;
}
}
三、协议
1.自定义协议
/* CommonEncoder就是将request或者response包装成协议包
* 协议分为五个部分:
* magicNumber表示这是一个协议包
* packageType表示这是一个请求还是响应(或者其他类型)
* SerializerType表示序列化的协议类型
* DataLength表示实际数据的长度,防止粘包
* 最后为数据部分
* +---------------+---------------+-----------------+-------------+
* | Magic Number | Package Type | Serializer Type | Data Length |
* | 4 bytes | 4 bytes | 4 bytes | 4 bytes |
* +---------------+---------------+-----------------+-------------+
* | Data Bytes |
* | Length: ${Data Length} |
* +---------------------------------------------------------------+
/
2.设置编码器:
package com.t598.core.codec;
import com.t598.common.entity.RpcRequest;
import com.t598.common.enumeration.PackageType;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToByteEncoder;
public class CommonEncoder extends MessageToByteEncoder {
private static final int MAGIC_NUMBER = 0xCAFEBABE;
private final CommonSerializer serializer;
public CommonEncoder(CommonSerializer serializer) {
this.serializer = serializer;
}
/*
实现序列化的编码器,按照自定义的协议结构进行
*/
@Override
protected void encode(ChannelHandlerContext channelHandlerContext, Object o, ByteBuf outbyteBuf) throws Exception {
//1.写入第一部分——协议包(MAGIC_NUMBER 4个字节)
outbyteBuf.writeInt(MAGIC_NUMBER);
//2.写入第二部分——数据类型(判断是请求类型还是响应类型)
if (o instanceof RpcRequest) {
outbyteBuf.writeInt(PackageType.REQUEST_PACK.getCode());
} else {
outbyteBuf.writeInt(PackageType.RESPONSE_PACK.getCode());
}
//3.写入第三部分——序列化协议
outbyteBuf.writeInt(serializer.getCode()); // 写入序列化类型(使用何种序列化方式)
byte[] bytes = this.serializer.serializer(o); // 将对象序列化,存在bytes中
//4.写入第四部分——数据长度
outbyteBuf.writeInt(bytes.length); // 获取序列化后的数据长度,并写入
//5.写入数据
outbyteBuf.writeBytes(bytes);
}
}
3.设置解码器:
package com.t598.core.codec;
import com.t598.common.entity.RpcRequest;
import com.t598.common.entity.RpcResponse;
import com.t598.common.enumeration.PackageType;
import com.t598.common.enumeration.RpcError;
import com.t598.common.exception.RpcException;
import com.t598.core.serializer.CommonSerializer;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ReplayingDecoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
public class CommonDecoder extends ReplayingDecoder {
private static final Logger logger = LoggerFactory.getLogger(CommonDecoder.class);
private static final int MAGIC_NUMBER = 0xCAFEBABE;
@Override
protected void decode(ChannelHandlerContext channelHandlerContext, ByteBuf inbyteBuf, List<Object> outlist) throws Exception {
//1.按顺序读取数据,第一部分——协议包
int magic = inbyteBuf.readInt(); // 按字节读取,前4个字节为魔数,表明是否为相同协议
if(magic != MAGIC_NUMBER){ // 如果当前魔数与数据包中不一致,则抛出异常
logger.error("无法识别的协议包:{}", magic);
throw new RpcException(RpcError.UNKNOWN_PROTOCOL);
}
//2.按顺序读取数据,第二部分——数据类型
int packageCode = inbyteBuf.readInt(); //同样按照字节读取,由于第一部分中已经读取4个字节,所以会依次往后再读4个,此刻即为代表数据类型的字节(请求or响应)
Class<?> packageClass;
//判断属于哪种数据包
if (packageCode == PackageType.REQUEST_PACK.getCode()) {
packageClass = RpcRequest.class;
} else if (packageCode == PackageType.RESPONSE_PACK.getCode()) {
packageClass = RpcResponse.class;
} else {
logger.error("无法识别的数据包:{}", packageCode);
throw new RpcException(RpcError.UNKNOWN_PACKAGE_TYPE);
}
//3.按顺序读取数据,第三部分——序列化协议
int serializerCode = inbyteBuf.readInt(); // 读取序列化协议的代码
CommonSerializer serializer = CommonSerializer.getByte(serializerCode);
if (serializer == null) {
logger.error("无法识别的反序列化器:{}", serializerCode);
throw new RpcException(RpcError.UNKNOWN_SERIALIZER);
}
//4.按顺序读取数据,第四部分——数据长度
int length = inbyteBuf.readInt();
byte[] bytes = new byte[length]; // 构建新的长为length的bytes
//5.读取数据
inbyteBuf.readBytes(bytes); // 将传输的数据写入bytes
//6.反序列化数据
Object obj = serializer.deserialize(bytes, packageClass);
outlist.add(obj);
}
}
四、抽象接口
1.将之前的RpcServer和RpcClient抽象为接口
抽象的目的是为了将基于Netty的传输和之前的传输区分开(用netty和之前的方式去分别实现这两个接口),使得框架中可以实现基于两种不同的传输模式。
package com.t598.common.transport;
import com.t598.common.entity.RpcRequest;
public interface RpcClient {
Object sendRequest(RpcRequest rpcRequest);
}
package com.t598.common.transport;
public interface RpcServer {
void start(int port);
}
2.NettyServer和NettyClient分别实现RpcServer和RpcClient接口
NettyServer
package com.t598.core.transport.netty.server;
import com.t598.core.codec.CommonDecoder;
import com.t598.core.codec.CommonEncoder;
import com.t598.core.serializer.JsonSerializer;
import com.t598.core.transport.RpcServer;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NettyServer implements RpcServer {
private static final Logger logger = LoggerFactory.getLogger(NettyServer.class);
/**
* 启动服务端并监听port端口
* @param port
*/
@Override
public void start(int port) {
/*
构建两个NioEventLoopGroup,boss负责监听accept,work负责read
*/
NioEventLoopGroup bossGroup = new NioEventLoopGroup();
NioEventLoopGroup workGroup = new NioEventLoopGroup();
try{
//1.构建启动器
ServerBootstrap serverBootstrap = new ServerBootstrap();
serverBootstrap.group(bossGroup, workGroup)
.channel(NioServerSocketChannel.class) // 添加channel
.handler(new LoggingHandler(LogLevel.INFO)) // 添加处理器,此处暂时添加日志的处理器
.option(ChannelOption.SO_BACKLOG, 256) //设置服务端接收连接的队列长度
.option(ChannelOption.SO_KEEPALIVE, true) //设置心跳机制,保持心跳连接
.childOption(ChannelOption.TCP_NODELAY, true) //立即发送数据
.childHandler(new ChannelInitializer<SocketChannel>(){
@Override
protected void initChannel(SocketChannel socketChannel) throws Exception {
// 构建pipeline
ChannelPipeline pipeline = socketChannel.pipeline();
pipeline.addLast(new CommonEncoder(new JsonSerializer())); // 添加编码器,使用JsonSerializer序列化
pipeline.addLast(new CommonDecoder()); // 添加解码器
pipeline.addLast(new NettyServerHandler()); // 添加NettyServerHandler
}
});
ChannelFuture future = serverBootstrap.bind(port).sync(); // 绑定端口并且阻塞
future.channel().closeFuture().sync();
}catch (InterruptedException e){
logger.error("启动服务器时有错误发生: ", e);
}finally {
bossGroup.shutdownGracefully(); // 关闭NioEventLoopGroup
workGroup.shutdownGracefully();
}
}
}
NettyClient
package com.t598.core.transport.netty.client;
import com.t598.common.entity.RpcRequest;
import com.t598.common.entity.RpcResponse;
import com.t598.core.codec.CommonDecoder;
import com.t598.core.codec.CommonEncoder;
import com.t598.core.serializer.JsonSerializer;
import com.t598.core.transport.RpcClient;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.*;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.AttributeKey;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NettyClient implements RpcClient {
private static final Logger logger = LoggerFactory.getLogger(NettyClient.class);
private String host;
private int port;
private static final Bootstrap bootstrap;
public NettyClient(String host, int port) {
this.host = host;
this.port = port;
}
/**
* 客户端启动器的最基础配置,只需要执行一次,因此放在静态代码块中
*/
static {
// 构建NioEventLoopGroup
NioEventLoopGroup group = new NioEventLoopGroup();
bootstrap = new Bootstrap(); // 启动器
bootstrap.group(group)
.channel(NioSocketChannel.class) // channel
.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel socketChannel) throws Exception {
ChannelPipeline pipeline = socketChannel.pipeline(); // 构建pipeline
pipeline.addLast(new CommonDecoder());
pipeline.addLast(new CommonEncoder(new JsonSerializer()));
pipeline.addLast(new NettyClientHandler());
}
})
.option(ChannelOption.SO_KEEPALIVE, true);
}
/**
* 发送RpcRequest对象,并接收结果
* @param rpcRequest
* @return
*/
@Override
public Object sendRequest(RpcRequest rpcRequest) {
try {
ChannelFuture future = bootstrap.connect(host, port).sync(); // 连接客户端
logger.info("客户端连接到服务器:{}:{}", host, port);
// 获取到连接的通道
Channel channel = future.channel();
if (channel != null) {
// 异步方法进行监听,当发送数据后判断是否发送成功,如果成功的话打印成功的日志,否则打印失败的日志
// 这里的发送时非阻塞的,判断是否发送成功但不能得到数据,
channel.writeAndFlush(rpcRequest).addListener(future1 -> {
if (future1.isSuccess()) {
logger.info(String.format("客户端发送消息:%s", rpcRequest.toString()));
} else {
logger.error("发送消息时有错误发生:", future.cause());
}
});
channel.closeFuture().sync();
// 这里通过获取到key 的方式获取到返回结果。
AttributeKey<RpcResponse> key = AttributeKey.valueOf("rpcResponse");
RpcResponse rpcResponse = channel.attr(key).get();
return rpcResponse.getData();
}
} catch (InterruptedException e) {
logger.error("发送消息时有错误发生:", e);
}
return null;
}
}
3.Handler
NettyClientHandler
package com.t598.core.transport.netty.client;
import com.t598.common.entity.RpcRequest;
import com.t598.common.entity.RpcResponse;
import com.t598.common.factory.SingletonFactory;
import com.t598.core.serializer.CommonSerializer;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.timeout.IdleState;
import io.netty.handler.timeout.IdleStateEvent;
import io.netty.util.ReferenceCountUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
/**
* Netty客户端侧处理器
*
*/
public class NettyClientHandler extends SimpleChannelInboundHandler<RpcResponse> {
private static final Logger logger = LoggerFactory.getLogger(NettyClientHandler.class);
private final UnprocessedRequests unprocessedRequests;
public NettyClientHandler() {
this.unprocessedRequests = SingletonFactory.getInstance(UnprocessedRequests.class);
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, RpcResponse msg) throws Exception {
try {
logger.info(String.format("客户端接收到消息: %s", msg));
unprocessedRequests.complete(msg); // 监听响应并处理
} finally {
ReferenceCountUtil.release(msg);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.error("过程调用时有错误发生:");
cause.printStackTrace();
ctx.close();
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof IdleStateEvent) {
IdleState state = ((IdleStateEvent) evt).state();
if (state == IdleState.WRITER_IDLE) {
logger.info("发送心跳包 [{}]", ctx.channel().remoteAddress());
Channel channel = ChannelProvider.get((InetSocketAddress) ctx.channel().remoteAddress(), CommonSerializer.getByte(CommonSerializer.DEFAULT_SERIALIZER));
RpcRequest rpcRequest = new RpcRequest();
rpcRequest.setHeartBeat(true);
channel.writeAndFlush(rpcRequest).addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
}
} else {
super.userEventTriggered(ctx, evt);
}
}
}
NettyServerHandler
package com.t598.core.transport.netty.server;
import com.t598.common.entity.RpcRequest;
import com.t598.common.entity.RpcResponse;
import com.t598.common.factory.SingletonFactory;
import com.t598.core.handler.RequestHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.timeout.IdleState;
import io.netty.handler.timeout.IdleStateEvent;
import io.netty.util.ReferenceCountUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Netty中处理RpcRequest的Handler
*
*/
public class NettyServerHandler extends SimpleChannelInboundHandler<RpcRequest> {
private static final Logger logger = LoggerFactory.getLogger(NettyServerHandler.class);
private final RequestHandler requestHandler;
public NettyServerHandler() {
this.requestHandler = SingletonFactory.getInstance(RequestHandler.class);
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, RpcRequest msg) throws Exception {
try {
if(msg.getHeartBeat()) {
logger.info("接收到客户端心跳包...");
return;
}
logger.info("服务器接收到请求: {}", msg);
Object result = requestHandler.handle(msg);
if (ctx.channel().isActive() && ctx.channel().isWritable()) {
ctx.writeAndFlush(RpcResponse.success(result, msg.getRequestId()));
} else {
logger.error("通道不可写");
}
} finally {
ReferenceCountUtil.release(msg);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.error("处理过程调用时有错误发生:");
cause.printStackTrace();
ctx.close();
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof IdleStateEvent) {
IdleState state = ((IdleStateEvent) evt).state();
if (state == IdleState.READER_IDLE) {
logger.info("长时间未收到心跳包,断开连接...");
ctx.close();
}
} else {
super.userEventTriggered(ctx, evt);
}
}
}
4.额外的类
PackageType(PRC-COMMON)
package com.t598.common.enumeration;
import lombok.AllArgsConstructor;
import lombok.Getter;
@AllArgsConstructor
@Getter
public enum PackageType {
REQUEST_PACK(0),
RESPONSE_PACK(1);
private final int code;
}
SerializerCode(PRC-COMMON)
package com.t598.common.enumeration;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* 字节流中标识序列化和反序列化器
*
*/
@AllArgsConstructor
@Getter
public enum SerializerCode {
KRYO(0),
JSON(1),
HESSIAN(2),
PROTOBUF(3);
private final int code;
}