前言
Thrift是由FB开发的rpc框架,后贡献到apache成为开源项目,thrift可以支持多种语言,如 C++, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, Smalltalk 等,本系列文章主要跟踪代码介绍其原理, 具体使用详细可以参见 https://www.ibm.com/developerworks/cn/java/j-lo-apachethrift/ 。
引用网上一张架构图:
接口层:先生成thrift的接口文件,根据其工具构造出java接口代码文件
业务层:实现上面接口层生成的接口,实现自己的业务逻辑
协议层:通过约定的数据格式来传输数据,比如:二进制、json
传输层:就是控制输入输出流对数据进行读写
Server: 负责监听端口获取数据,提供不同的io和线程模型
Client: 跟服务端建立连接传输数据
Server
先看一个类图
TServer为最上层接口,它定义了启动和停止方法方法,下面根据不同线程io有不同的实现:
1. TSimpleServer:看名字就知道是最简单的服务器模型,这是最原始单线程blocking io,看看其启动代码:
public class TSimpleServer extends TServer {
public void serve() {
...
while (!stopped_) {//等每个处理完再从新来过
TTransport client = null;
TProcessor processor = null;
TTransport inputTransport = null;
TTransport outputTransport = null;
TProtocol inputProtocol = null;
TProtocol outputProtocol = null;
ServerContext connectionContext = null;
try {
client = serverTransport_.accept();//接受连接
if (client != null) {
processor = processorFactory_.getProcessor(client);
//根据接收到的连接拿到输入输出流
inputTransport = inputTransportFactory_.getTransport(client);
outputTransport = outputTransportFactory_.getTransport(client);
inputProtocol = inputProtocolFactory_.getProtocol(inputTransport);
outputProtocol = outputProtocolFactory_.getProtocol(outputTransport);
if (eventHandler_ != null) {
connectionContext = eventHandler_.createContext(inputProtocol, outputProtocol);
}
while (true) {
...
if (!processor.process(inputProtocol, outputProtocol)) {//处理逻辑
break;
}
}
}
...
}
setServing(false);
}
}
public class TSimpleServer extends TServer {
public void serve() {
...
while (!stopped_) {//等每个处理完再从新来过
TTransport client = null;
TProcessor processor = null;
TTransport inputTransport = null;
TTransport outputTransport = null;
TProtocol inputProtocol = null;
TProtocol outputProtocol = null;
ServerContext connectionContext = null;
try {
client = serverTransport_.accept();//接受连接
if (client != null) {
processor = processorFactory_.getProcessor(client);
//根据接收到的连接拿到输入输出流
inputTransport = inputTransportFactory_.getTransport(client);
outputTransport = outputTransportFactory_.getTransport(client);
inputProtocol = inputProtocolFactory_.getProtocol(inputTransport);
outputProtocol = outputProtocolFactory_.getProtocol(outputTransport);
if (eventHandler_ != null) {
connectionContext = eventHandler_.createContext(inputProtocol, outputProtocol);
}
while (true) {
...
if (!processor.process(inputProtocol, outputProtocol)) {//处理逻辑
break;
}
}
}
...
}
setServing(false);
}
}
它接受一个连接,然后交由业务处理器处理,处理完了之后等客户端关闭,然后重新接收请求,单线程阻塞的干活,实际工作中用肯定死翘翘。
2. TThreadPoolServer:是在TSimpleServer上的变种,多线程blocking io,accept后把TTransport放到线程池中处理
public class TThreadPoolServer extends TServer {
public void serve() {
...
while (!stopped_) {
try {
TTransport client = serverTransport_.accept();
WorkerProcess wp = new WorkerProcess(client);
int retryCount = 0;
long remainTimeInMillis = requestTimeoutUnit.toMillis(requestTimeout);
while (true) {
try {
//放入线程池队列,默认是同步队列SynchronousQueue,该队列需要有消费线程入队才会成功,这边循环放。
executorService_.execute(wp);
break;
} catch (Throwable t) {
...
}
}
} catch (TTransportException ttx) {
if (!stopped_) {
++failureCount;
LOGGER.warn("Transport error occurred during acceptance of message.", ttx);
}
}
}
...
setServing(false);
}
//创建默认线程池
private static ExecutorService createDefaultExecutorService(Args args) {
SynchronousQueue<Runnable> executorQueue = new SynchronousQueue<Runnable>();//同步队列
return new ThreadPoolExecutor(args.minWorkerThreads, args.maxWorkerThreads, args.stopTimeoutVal,
TimeUnit.SECONDS, executorQueue);
}
private class WorkerProcess implements Runnable {
/**
* Client that this services.
*/
private TTransport client_;
/**
* Default constructor.
*
* @param client
* Transport to process
*/
private WorkerProcess(TTransport client) {
client_ = client;
}
/**
* Loops on processing a client forever
*/
public void run() {//跟TSimpleServer处理逻辑一样
TProcessor processor = null;
TTransport inputTransport = null;
TTransport outputTransport = null;
TProtocol inputProtocol = null;
TProtocol outputProtocol = null;
TServerEventHandler eventHandler = null;
ServerContext connectionContext = null;
try {
processor = processorFactory_.getProcessor(client_);
inputTransport = inputTransportFactory_.getTransport(client_);
outputTransport = outputTransportFactory_.getTransport(client_);
inputProtocol = inputProtocolFactory_.getProtocol(inputTransport);
outputProtocol = outputProtocolFactory_.getProtocol(outputTransport);
eventHandler = getEventHandler();
if (eventHandler != null) {
connectionContext = eventHandler.createContext(inputProtocol, outputProtocol);
}
// we check stopped_ first to make sure we're not supposed to be
// shutting
// down. this is necessary for graceful shutdown.
while (true) {
if (eventHandler != null) {
eventHandler.processContext(connectionContext, inputTransport, outputTransport);
}
if (stopped_ || !processor.process(inputProtocol, outputProtocol)) {
break;
}
}
...
}
}
}
public class TThreadPoolServer extends TServer {
public void serve() {
...
while (!stopped_) {
try {
TTransport client = serverTransport_.accept();
WorkerProcess wp = new WorkerProcess(client);
int retryCount = 0;
long remainTimeInMillis = requestTimeoutUnit.toMillis(requestTimeout);
while (true) {
try {
//放入线程池队列,默认是同步队列SynchronousQueue,该队列需要有消费线程入队才会成功,这边循环放。
executorService_.execute(wp);
break;
} catch (Throwable t) {
...
}
}
} catch (TTransportException ttx) {
if (!stopped_) {
++failureCount;
LOGGER.warn("Transport error occurred during acceptance of message.", ttx);
}
}
}
...
setServing(false);
}
//创建默认线程池
private static ExecutorService createDefaultExecutorService(Args args) {
SynchronousQueue<Runnable> executorQueue = new SynchronousQueue<Runnable>();//同步队列
return new ThreadPoolExecutor(args.minWorkerThreads, args.maxWorkerThreads, args.stopTimeoutVal,
TimeUnit.SECONDS, executorQueue);
}
private class WorkerProcess implements Runnable {
/**
* Client that this services.
*/
private TTransport client_;
/**
* Default constructor.
*
* @param client
* Transport to process
*/
private WorkerProcess(TTransport client) {
client_ = client;
}
/**
* Loops on processing a client forever
*/
public void run() {//跟TSimpleServer处理逻辑一样
TProcessor processor = null;
TTransport inputTransport = null;
TTransport outputTransport = null;
TProtocol inputProtocol = null;
TProtocol outputProtocol = null;
TServerEventHandler eventHandler = null;
ServerContext connectionContext = null;
try {
processor = processorFactory_.getProcessor(client_);
inputTransport = inputTransportFactory_.getTransport(client_);
outputTransport = outputTransportFactory_.getTransport(client_);
inputProtocol = inputProtocolFactory_.getProtocol(inputTransport);
outputProtocol = outputProtocolFactory_.getProtocol(outputTransport);
eventHandler = getEventHandler();
if (eventHandler != null) {
connectionContext = eventHandler.createContext(inputProtocol, outputProtocol);
}
// we check stopped_ first to make sure we're not supposed to be
// shutting
// down. this is necessary for graceful shutdown.
while (true) {
if (eventHandler != null) {
eventHandler.processContext(connectionContext, inputTransport, outputTransport);
}
if (stopped_ || !processor.process(inputProtocol, outputProtocol)) {
break;
}
}
...
}
}
}
内部默认线程池用的队列是同步队列SynchronousQueue,往里面放,如果有接收线程则成功,没有则失败,这意味着如果需要并发1000就需要有1000个线程,如果提前知道请求的线程数并且知道线程不会很多可以用该方法。
3. TNonblockingServer:使用了non-blocking io,这样可以避免每个客户端阻塞等待,它的accept,read,write都是注册在同一个Selector上,它内部用来处理这几个事件的是SelectAcceptThread线程,
它继承自AbstractNonblockingServer,AbstractNonblockingServer里面封装了一个FrameBuffer作为数据输入输出流的缓冲,同时还充当了rpc的调用,FrameBuffer由后面说明,这篇只说明server线程io模型。
public class TNonblockingServer extends AbstractNonblockingServer {
/**
* Start the selector thread to deal with accepts and client messages.
*
* @return true if everything went ok, false if we couldn't start for some
* reason.
*/
@Override
protected boolean startThreads() {
// start the selector
try {//启动一个线程处理说有事件
selectAcceptThread_ = new SelectAcceptThread((TNonblockingServerTransport) serverTransport_);
selectAcceptThread_.start();
return true;
} catch (IOException e) {
LOGGER.error("Failed to start selector thread!", e);
return false;
}
}
//rpc调用,AbstractNonblockingServer 中为抽象方法,每个子类都有自己的实现
@Override
protected boolean requestInvoke(FrameBuffer frameBuffer) {
frameBuffer.invoke();//buffer中获取处理器处理业务
return true;
}
/**
* The thread that will be doing all the selecting, managing new connections
* and those that still need to be read.
*/
protected class SelectAcceptThread extends AbstractSelectThread {
// The server transport on which new client transports will be accepted
private final TNonblockingServerTransport serverTransport;
/**
* Set up the thread that will handle the non-blocking accepts, reads,
* and writes.
*/
public SelectAcceptThread(final TNonblockingServerTransport serverTransport) throws IOException {
this.serverTransport = serverTransport;
serverTransport.registerSelector(selector);//注册到java.nio.channels.Selector
}
public boolean isStopped() {
return stopped_;
}
/**
* The work loop. Handles both selecting (all IO operations) and
* managing the selection preferences of all existing connections.
*/
public void run() {
try {
if (eventHandler_ != null) {
eventHandler_.preServe();
}
while (!stopped_) {
select();//接收并处理事件
processInterestChanges();
}
for (SelectionKey selectionKey : selector.keys()) {
cleanupSelectionKey(selectionKey);
}
} catch (Throwable t) {
LOGGER.error("run() exiting due to uncaught error", t);
} finally {
try {
selector.close();
} catch (IOException e) {
LOGGER.error("Got an IOException while closing selector!", e);
}
stopped_ = true;
}
}
/**
* Select and process IO events appropriately: If there are connections
* to be accepted, accept them. If there are existing connections with
* data waiting to be read, read it, buffering until a whole frame has
* been read. If there are any pending responses, buffer them until
* their target client is available, and then send the data.
*/
private void select() {
try {
// wait for io events.
selector.select();//jdk的select,等待事件
// process the io events we received
Iterator<SelectionKey> selectedKeys = selector.selectedKeys().iterator();
while (!stopped_ && selectedKeys.hasNext()) {
SelectionKey key = selectedKeys.next();
selectedKeys.remove();
// skip if not valid
if (!key.isValid()) {
cleanupSelectionKey(key);
continue;
}
// if the key is marked Accept, then it has to be the server
// transport.
if (key.isAcceptable()) {
handleAccept();//处理accept事件
} else if (key.isReadable()) {
// deal with reads
handleRead(key);//处理read事件
} else if (key.isWritable()) {
// deal with writes
handleWrite(key);//处理write事件
} else {
LOGGER.warn("Unexpected state in select! " + key.interestOps());
}
}
} catch (IOException e) {
LOGGER.warn("Got an IOException while selecting!", e);
}
}
protected FrameBuffer createFrameBuffer(final TNonblockingTransport trans, final SelectionKey selectionKey,
final AbstractSelectThread selectThread) {
return processorFactory_.isAsyncProcessor() ? new AsyncFrameBuffer(trans, selectionKey, selectThread)
: new FrameBuffer(trans, selectionKey, selectThread);
}
/**
* Accept a new connection.
*/
private void handleAccept() throws IOException {
SelectionKey clientKey = null;
TNonblockingTransport client = null;
try {
// accept the connection
client = (TNonblockingTransport) serverTransport.accept();//如果有连接过来
clientKey = client.registerSelector(selector, SelectionKey.OP_READ);//把它的read事件注册到selector
// add this key to the map
//封装一个buffer作为附件,处理输入输出流、调用后面processor
FrameBuffer frameBuffer = createFrameBuffer(client, clientKey, SelectAcceptThread.this);
clientKey.attach(frameBuffer);
} catch (TTransportException tte) {
// something went wrong accepting.
LOGGER.warn("Exception trying to accept!", tte);
tte.printStackTrace();
if (clientKey != null)
cleanupSelectionKey(clientKey);
if (client != null)
client.close();
}
}
} // SelectAcceptThread
public class TNonblockingServer extends AbstractNonblockingServer {
/**
* Start the selector thread to deal with accepts and client messages.
*
* @return true if everything went ok, false if we couldn't start for some
* reason.
*/
@Override
protected boolean startThreads() {
// start the selector
try {//启动一个线程处理说有事件
selectAcceptThread_ = new SelectAcceptThread((TNonblockingServerTransport) serverTransport_);
selectAcceptThread_.start();
return true;
} catch (IOException e) {
LOGGER.error("Failed to start selector thread!", e);
return false;
}
}
//rpc调用,AbstractNonblockingServer 中为抽象方法,每个子类都有自己的实现
@Override
protected boolean requestInvoke(FrameBuffer frameBuffer) {
frameBuffer.invoke();//buffer中获取处理器处理业务
return true;
}
/**
* The thread that will be doing all the selecting, managing new connections
* and those that still need to be read.
*/
protected class SelectAcceptThread extends AbstractSelectThread {
// The server transport on which new client transports will be accepted
private final TNonblockingServerTransport serverTransport;
/**
* Set up the thread that will handle the non-blocking accepts, reads,
* and writes.
*/
public SelectAcceptThread(final TNonblockingServerTransport serverTransport) throws IOException {
this.serverTransport = serverTransport;
serverTransport.registerSelector(selector);//注册到java.nio.channels.Selector
}
public boolean isStopped() {
return stopped_;
}
/**
* The work loop. Handles both selecting (all IO operations) and
* managing the selection preferences of all existing connections.
*/
public void run() {
try {
if (eventHandler_ != null) {
eventHandler_.preServe();
}
while (!stopped_) {
select();//接收并处理事件
processInterestChanges();
}
for (SelectionKey selectionKey : selector.keys()) {
cleanupSelectionKey(selectionKey);
}
} catch (Throwable t) {
LOGGER.error("run() exiting due to uncaught error", t);
} finally {
try {
selector.close();
} catch (IOException e) {
LOGGER.error("Got an IOException while closing selector!", e);
}
stopped_ = true;
}
}
/**
* Select and process IO events appropriately: If there are connections
* to be accepted, accept them. If there are existing connections with
* data waiting to be read, read it, buffering until a whole frame has
* been read. If there are any pending responses, buffer them until
* their target client is available, and then send the data.
*/
private void select() {
try {
// wait for io events.
selector.select();//jdk的select,等待事件
// process the io events we received
Iterator<SelectionKey> selectedKeys = selector.selectedKeys().iterator();
while (!stopped_ && selectedKeys.hasNext()) {
SelectionKey key = selectedKeys.next();
selectedKeys.remove();
// skip if not valid
if (!key.isValid()) {
cleanupSelectionKey(key);
continue;
}
// if the key is marked Accept, then it has to be the server
// transport.
if (key.isAcceptable()) {
handleAccept();//处理accept事件
} else if (key.isReadable()) {
// deal with reads
handleRead(key);//处理read事件
} else if (key.isWritable()) {
// deal with writes
handleWrite(key);//处理write事件
} else {
LOGGER.warn("Unexpected state in select! " + key.interestOps());
}
}
} catch (IOException e) {
LOGGER.warn("Got an IOException while selecting!", e);
}
}
protected FrameBuffer createFrameBuffer(final TNonblockingTransport trans, final SelectionKey selectionKey,
final AbstractSelectThread selectThread) {
return processorFactory_.isAsyncProcessor() ? new AsyncFrameBuffer(trans, selectionKey, selectThread)
: new FrameBuffer(trans, selectionKey, selectThread);
}
/**
* Accept a new connection.
*/
private void handleAccept() throws IOException {
SelectionKey clientKey = null;
TNonblockingTransport client = null;
try {
// accept the connection
client = (TNonblockingTransport) serverTransport.accept();//如果有连接过来
clientKey = client.registerSelector(selector, SelectionKey.OP_READ);//把它的read事件注册到selector
// add this key to the map
//封装一个buffer作为附件,处理输入输出流、调用后面processor
FrameBuffer frameBuffer = createFrameBuffer(client, clientKey, SelectAcceptThread.this);
clientKey.attach(frameBuffer);
} catch (TTransportException tte) {
// something went wrong accepting.
LOGGER.warn("Exception trying to accept!", tte);
tte.printStackTrace();
if (clientKey != null)
cleanupSelectionKey(clientKey);
if (client != null)
client.close();
}
}
} // SelectAcceptThread
上面SelectAcceptThread所继承的AbstractSelectThread:
protected abstract class AbstractSelectThread extends Thread {
//AbstractSelectThread的读写方法
/**
* Do the work required to read from a readable client. If the frame is
* fully read, then invoke the method call.
*/
protected void handleRead(SelectionKey key) {
FrameBuffer buffer = (FrameBuffer) key.attachment();
if (!buffer.read()) {//输入流读入到buffer
cleanupSelectionKey(key);
return;
}
// if the buffer's frame read is complete, invoke the method.
if (buffer.isFrameFullyRead()) {
if (!requestInvoke(buffer)) {//如果数据读完就调用requestInvoke
cleanupSelectionKey(key);
}
}
}
/**
* Let a writable client get written, if there's data to be written.
*/
protected void handleWrite(SelectionKey key) {
FrameBuffer buffer = (FrameBuffer) key.attachment();
if (!buffer.write()) {//写数据
cleanupSelectionKey(key);
}
}
}
上面SelectAcceptThread所继承的AbstractSelectThread:
protected abstract class AbstractSelectThread extends Thread {
//AbstractSelectThread的读写方法
/**
* Do the work required to read from a readable client. If the frame is
* fully read, then invoke the method call.
*/
protected void handleRead(SelectionKey key) {
FrameBuffer buffer = (FrameBuffer) key.attachment();
if (!buffer.read()) {//输入流读入到buffer
cleanupSelectionKey(key);
return;
}
// if the buffer's frame read is complete, invoke the method.
if (buffer.isFrameFullyRead()) {
if (!requestInvoke(buffer)) {//如果数据读完就调用requestInvoke
cleanupSelectionKey(key);
}
}
}
/**
* Let a writable client get written, if there's data to be written.
*/
protected void handleWrite(SelectionKey key) {
FrameBuffer buffer = (FrameBuffer) key.attachment();
if (!buffer.write()) {//写数据
cleanupSelectionKey(key);
}
}
}
4. THsHaServer:继承自TNonblockingServer,它跟父类的区别是requestInvoke实现不同,它的父类是直接调用frameBuffer.invoke();,它内部初始化了一个线程池,把frameBuffer.invoke();放到线程池中执行:
public class THsHaServer extends TNonblockingServer {
/**
* We override the standard invoke method here to queue the invocation for
* invoker service instead of immediately invoking. The thread pool takes
* care of the rest.
*/
@Override
protected boolean requestInvoke(FrameBuffer frameBuffer) {
try {
Runnable invocation = getRunnable(frameBuffer);
invoker.execute(invocation);
return true;
} catch (RejectedExecutionException rx) {
LOGGER.warn("ExecutorService rejected execution!", rx);
return false;
}
}
protected Runnable getRunnable(FrameBuffer frameBuffer) {
return new Invocation(frameBuffer);
}
}
/**
* An Invocation represents a method call that is prepared to execute, given an
* idle worker thread. It contains the input and output protocols the thread's
* processor should use to perform the usual Thrift invocation.
*/
class Invocation implements Runnable {
private final FrameBuffer frameBuffer;
public Invocation(final FrameBuffer frameBuffer) {
this.frameBuffer = frameBuffer;
}
public void run() {
frameBuffer.invoke();
}
}
public class THsHaServer extends TNonblockingServer {
/**
* We override the standard invoke method here to queue the invocation for
* invoker service instead of immediately invoking. The thread pool takes
* care of the rest.
*/
@Override
protected boolean requestInvoke(FrameBuffer frameBuffer) {
try {
Runnable invocation = getRunnable(frameBuffer);
invoker.execute(invocation);
return true;
} catch (RejectedExecutionException rx) {
LOGGER.warn("ExecutorService rejected execution!", rx);
return false;
}
}
protected Runnable getRunnable(FrameBuffer frameBuffer) {
return new Invocation(frameBuffer);
}
}
/**
* An Invocation represents a method call that is prepared to execute, given an
* idle worker thread. It contains the input and output protocols the thread's
* processor should use to perform the usual Thrift invocation.
*/
class Invocation implements Runnable {
private final FrameBuffer frameBuffer;
public Invocation(final FrameBuffer frameBuffer) {
this.frameBuffer = frameBuffer;
}
public void run() {
frameBuffer.invoke();
}
}
5. TThreadedSelectorServer:使用了non-blocking io,它的accept是注册在一个AcceptThread线程上处理, read,write都是注册在多个SelectorThread上处理,收到accept事件后,把连接放到SelectorThread的队列中。
public class TThreadedSelectorServer extends AbstractNonblockingServer {
@Override
protected boolean startThreads() {
try {
for (int i = 0; i < args.selectorThreads; ++i) {
selectorThreads.add(new SelectorThread(args.acceptQueueSizePerThread));
}
acceptThread = new AcceptThread((TNonblockingServerTransport) serverTransport_,
createSelectorThreadLoadBalancer(selectorThreads));//一个线程处理accept事件
for (SelectorThread thread : selectorThreads) {//多个线程数量read/write事件
thread.start();
}
acceptThread.start();
return true;
} catch (IOException e) {
LOGGER.error("Failed to start threads!", e);
return false;
}
}
/**
* We override the standard invoke method here to queue the invocation for
* invoker service instead of immediately invoking. If there is no thread
* pool, handle the invocation inline on this thread
*/
@Override
protected boolean requestInvoke(FrameBuffer frameBuffer) {
Runnable invocation = getRunnable(frameBuffer);
if (invoker != null) {
try {
invoker.execute(invocation);//buffer中调用rpc,放到线程池中执行
return true;
} catch (RejectedExecutionException rx) {
LOGGER.warn("ExecutorService rejected execution!", rx);
return false;
}
} else {
// Invoke on the caller's thread
invocation.run();
return true;
}
}
/**
* The thread that selects on the server transport (listen socket) and
* accepts new connections to hand off to the IO selector threads
*/
protected class AcceptThread extends Thread {
// The listen socket to accept on
private final TNonblockingServerTransport serverTransport;
private final Selector acceptSelector;
private final SelectorThreadLoadBalancer threadChooser;//selector线程选择器
/**
* Set up the AcceptThead
*
* @throws IOException
*/
public AcceptThread(TNonblockingServerTransport serverTransport, SelectorThreadLoadBalancer threadChooser)
throws IOException {
this.serverTransport = serverTransport;
this.threadChooser = threadChooser;
this.acceptSelector = SelectorProvider.provider().openSelector();
this.serverTransport.registerSelector(acceptSelector);//注册
}
/**
* The work loop. Selects on the server transport and accepts. If there
* was a server transport that had blocking accepts, and returned on
* blocking client transports, that should be used instead
*/
public void run() {
try {
if (eventHandler_ != null) {
eventHandler_.preServe();
}
while (!stopped_) {
select();//无限select
}
} catch (Throwable t) {
LOGGER.error("run() on AcceptThread exiting due to uncaught error", t);
} finally {
try {
acceptSelector.close();
} catch (IOException e) {
LOGGER.error("Got an IOException while closing accept selector!", e);
}
// This will wake up the selector threads
TThreadedSelectorServer.this.stop();
}
}
/**
* If the selector is blocked, wake it up.
*/
public void wakeupSelector() {
acceptSelector.wakeup();
}
/**
* Select and process IO events appropriately: If there are connections
* to be accepted, accept them.
*/
private void select() {
try {
// wait for connect events.
acceptSelector.select();//底层jdk的select
// process the io events we received
Iterator<SelectionKey> selectedKeys = acceptSelector.selectedKeys().iterator();
//处理收到的事件
while (!stopped_ && selectedKeys.hasNext()) {
SelectionKey key = selectedKeys.next();
selectedKeys.remove();
// skip if not valid
if (!key.isValid()) {
continue;
}
if (key.isAcceptable()) {//如果收到的事件是accept(收到客户端连接)
handleAccept();
} else {
LOGGER.warn("Unexpected state in select! " + key.interestOps());
}
}
} catch (IOException e) {
LOGGER.warn("Got an IOException while selecting!", e);
}
}
/**
* Accept a new connection.
*/
private void handleAccept() {
final TNonblockingTransport client = doAccept();//获得连接
if (client != null) {
// Pass this connection to a selector thread
final SelectorThread targetThread = threadChooser.nextThread();//选择一个selector
if (args.acceptPolicy == Args.AcceptPolicy.FAST_ACCEPT || invoker == null) {
doAddAccept(targetThread, client);//把连接注册到selectorThread上
} else {
// FAIR_ACCEPT
try {
invoker.submit(new Runnable() {
public void run() {
doAddAccept(targetThread, client);
}
});
} catch (RejectedExecutionException rx) {
LOGGER.warn("ExecutorService rejected accept registration!", rx);
// close immediately
client.close();
}
}
}
}
private TNonblockingTransport doAccept() {
try {
return (TNonblockingTransport) serverTransport.accept();
} catch (TTransportException tte) {
// something went wrong accepting.
LOGGER.warn("Exception trying to accept!", tte);
return null;
}
}
private void doAddAccept(SelectorThread thread, TNonblockingTransport client) {
if (!thread.addAcceptedConnection(client)) {//加入到selectorThread的队列中
client.close();
}
}
} // AcceptThread
/**
* The SelectorThread(s) will be doing all the selecting on accepted active
* connections.
*/
protected class SelectorThread extends AbstractSelectThread {
// Accepted connections added by the accept thread.
private final BlockingQueue<TNonblockingTransport> acceptedQueue;//accept收到连接后存放该队列
/**
* Hands off an accepted connection to be handled by this thread. This
* method will block if the queue for new connections is at capacity.
*
* @param accepted
* The connection that has been accepted.
* @return true if the connection has been successfully added.
*/
public boolean addAcceptedConnection(TNonblockingTransport accepted) {//acceptThread调用该方法
try {
acceptedQueue.put(accepted);
} catch (InterruptedException e) {
LOGGER.warn("Interrupted while adding accepted connection!", e);
return false;
}
selector.wakeup();
return true;
}
/**
* The work loop. Handles selecting (read/write IO), dispatching, and
* managing the selection preferences of all existing connections.
*/
public void run() {
try {
while (!stopped_) {
select();//无限select
processAcceptedConnections();
processInterestChanges();
}
for (SelectionKey selectionKey : selector.keys()) {
cleanupSelectionKey(selectionKey);
}
} catch (Throwable t) {
LOGGER.error("run() on SelectorThread exiting due to uncaught error", t);
} finally {
try {
selector.close();
} catch (IOException e) {
LOGGER.error("Got an IOException while closing selector!", e);
}
// This will wake up the accept thread and the other selector
// threads
TThreadedSelectorServer.this.stop();
}
}
/**
* Select and process IO events appropriately: If there are existing
* connections with data waiting to be read, read it, buffering until a
* whole frame has been read. If there are any pending responses, buffer
* them until their target client is available, and then send the data.
*/
private void select() {
try {
// wait for io events.
selector.select();//jdk select
// process the io events we received
Iterator<SelectionKey> selectedKeys = selector.selectedKeys().iterator();
//处理收到的事件
while (!stopped_ && selectedKeys.hasNext()) {
SelectionKey key = selectedKeys.next();
selectedKeys.remove();
// skip if not valid
if (!key.isValid()) {
cleanupSelectionKey(key);
continue;
}
if (key.isReadable()) {//如果是读事件
// deal with reads
handleRead(key);
} else if (key.isWritable()) {//如果是写事件
// deal with writes
handleWrite(key);
} else {
LOGGER.warn("Unexpected state in select! " + key.interestOps());
}
}
} catch (IOException e) {
LOGGER.warn("Got an IOException while selecting!", e);
}
}
//处理接收到的连接,注册事件
private void processAcceptedConnections() {
// Register accepted connections
while (!stopped_) {
TNonblockingTransport accepted = acceptedQueue.poll();
if (accepted == null) {
break;
}
registerAccepted(accepted);
}
}
protected FrameBuffer createFrameBuffer(final TNonblockingTransport trans, final SelectionKey selectionKey,
final AbstractSelectThread selectThread) {
return processorFactory_.isAsyncProcessor() ? new AsyncFrameBuffer(trans, selectionKey, selectThread)
: new FrameBuffer(trans, selectionKey, selectThread);
}
private void registerAccepted(TNonblockingTransport accepted) {
SelectionKey clientKey = null;
try {
clientKey = accepted.registerSelector(selector, SelectionKey.OP_READ);
//注册后创建一个buffer作为附件
FrameBuffer frameBuffer = createFrameBuffer(accepted, clientKey, SelectorThread.this);
clientKey.attach(frameBuffer);
} catch (IOException e) {
LOGGER.warn("Failed to register accepted connection to selector!", e);
if (clientKey != null) {
cleanupSelectionKey(clientKey);
}
accepted.close();
}
}
} // SelectorThread
//读写事件处理继承自AbstractSelectThread
}
public class TThreadedSelectorServer extends AbstractNonblockingServer {
@Override
protected boolean startThreads() {
try {
for (int i = 0; i < args.selectorThreads; ++i) {
selectorThreads.add(new SelectorThread(args.acceptQueueSizePerThread));
}
acceptThread = new AcceptThread((TNonblockingServerTransport) serverTransport_,
createSelectorThreadLoadBalancer(selectorThreads));//一个线程处理accept事件
for (SelectorThread thread : selectorThreads) {//多个线程数量read/write事件
thread.start();
}
acceptThread.start();
return true;
} catch (IOException e) {
LOGGER.error("Failed to start threads!", e);
return false;
}
}
/**
* We override the standard invoke method here to queue the invocation for
* invoker service instead of immediately invoking. If there is no thread
* pool, handle the invocation inline on this thread
*/
@Override
protected boolean requestInvoke(FrameBuffer frameBuffer) {
Runnable invocation = getRunnable(frameBuffer);
if (invoker != null) {
try {
invoker.execute(invocation);//buffer中调用rpc,放到线程池中执行
return true;
} catch (RejectedExecutionException rx) {
LOGGER.warn("ExecutorService rejected execution!", rx);
return false;
}
} else {
// Invoke on the caller's thread
invocation.run();
return true;
}
}
/**
* The thread that selects on the server transport (listen socket) and
* accepts new connections to hand off to the IO selector threads
*/
protected class AcceptThread extends Thread {
// The listen socket to accept on
private final TNonblockingServerTransport serverTransport;
private final Selector acceptSelector;
private final SelectorThreadLoadBalancer threadChooser;//selector线程选择器
/**
* Set up the AcceptThead
*
* @throws IOException
*/
public AcceptThread(TNonblockingServerTransport serverTransport, SelectorThreadLoadBalancer threadChooser)
throws IOException {
this.serverTransport = serverTransport;
this.threadChooser = threadChooser;
this.acceptSelector = SelectorProvider.provider().openSelector();
this.serverTransport.registerSelector(acceptSelector);//注册
}
/**
* The work loop. Selects on the server transport and accepts. If there
* was a server transport that had blocking accepts, and returned on
* blocking client transports, that should be used instead
*/
public void run() {
try {
if (eventHandler_ != null) {
eventHandler_.preServe();
}
while (!stopped_) {
select();//无限select
}
} catch (Throwable t) {
LOGGER.error("run() on AcceptThread exiting due to uncaught error", t);
} finally {
try {
acceptSelector.close();
} catch (IOException e) {
LOGGER.error("Got an IOException while closing accept selector!", e);
}
// This will wake up the selector threads
TThreadedSelectorServer.this.stop();
}
}
/**
* If the selector is blocked, wake it up.
*/
public void wakeupSelector() {
acceptSelector.wakeup();
}
/**
* Select and process IO events appropriately: If there are connections
* to be accepted, accept them.
*/
private void select() {
try {
// wait for connect events.
acceptSelector.select();//底层jdk的select
// process the io events we received
Iterator<SelectionKey> selectedKeys = acceptSelector.selectedKeys().iterator();
//处理收到的事件
while (!stopped_ && selectedKeys.hasNext()) {
SelectionKey key = selectedKeys.next();
selectedKeys.remove();
// skip if not valid
if (!key.isValid()) {
continue;
}
if (key.isAcceptable()) {//如果收到的事件是accept(收到客户端连接)
handleAccept();
} else {
LOGGER.warn("Unexpected state in select! " + key.interestOps());
}
}
} catch (IOException e) {
LOGGER.warn("Got an IOException while selecting!", e);
}
}
/**
* Accept a new connection.
*/
private void handleAccept() {
final TNonblockingTransport client = doAccept();//获得连接
if (client != null) {
// Pass this connection to a selector thread
final SelectorThread targetThread = threadChooser.nextThread();//选择一个selector
if (args.acceptPolicy == Args.AcceptPolicy.FAST_ACCEPT || invoker == null) {
doAddAccept(targetThread, client);//把连接注册到selectorThread上
} else {
// FAIR_ACCEPT
try {
invoker.submit(new Runnable() {
public void run() {
doAddAccept(targetThread, client);
}
});
} catch (RejectedExecutionException rx) {
LOGGER.warn("ExecutorService rejected accept registration!", rx);
// close immediately
client.close();
}
}
}
}
private TNonblockingTransport doAccept() {
try {
return (TNonblockingTransport) serverTransport.accept();
} catch (TTransportException tte) {
// something went wrong accepting.
LOGGER.warn("Exception trying to accept!", tte);
return null;
}
}
private void doAddAccept(SelectorThread thread, TNonblockingTransport client) {
if (!thread.addAcceptedConnection(client)) {//加入到selectorThread的队列中
client.close();
}
}
} // AcceptThread
/**
* The SelectorThread(s) will be doing all the selecting on accepted active
* connections.
*/
protected class SelectorThread extends AbstractSelectThread {
// Accepted connections added by the accept thread.
private final BlockingQueue<TNonblockingTransport> acceptedQueue;//accept收到连接后存放该队列
/**
* Hands off an accepted connection to be handled by this thread. This
* method will block if the queue for new connections is at capacity.
*
* @param accepted
* The connection that has been accepted.
* @return true if the connection has been successfully added.
*/
public boolean addAcceptedConnection(TNonblockingTransport accepted) {//acceptThread调用该方法
try {
acceptedQueue.put(accepted);
} catch (InterruptedException e) {
LOGGER.warn("Interrupted while adding accepted connection!", e);
return false;
}
selector.wakeup();
return true;
}
/**
* The work loop. Handles selecting (read/write IO), dispatching, and
* managing the selection preferences of all existing connections.
*/
public void run() {
try {
while (!stopped_) {
select();//无限select
processAcceptedConnections();
processInterestChanges();
}
for (SelectionKey selectionKey : selector.keys()) {
cleanupSelectionKey(selectionKey);
}
} catch (Throwable t) {
LOGGER.error("run() on SelectorThread exiting due to uncaught error", t);
} finally {
try {
selector.close();
} catch (IOException e) {
LOGGER.error("Got an IOException while closing selector!", e);
}
// This will wake up the accept thread and the other selector
// threads
TThreadedSelectorServer.this.stop();
}
}
/**
* Select and process IO events appropriately: If there are existing
* connections with data waiting to be read, read it, buffering until a
* whole frame has been read. If there are any pending responses, buffer
* them until their target client is available, and then send the data.
*/
private void select() {
try {
// wait for io events.
selector.select();//jdk select
// process the io events we received
Iterator<SelectionKey> selectedKeys = selector.selectedKeys().iterator();
//处理收到的事件
while (!stopped_ && selectedKeys.hasNext()) {
SelectionKey key = selectedKeys.next();
selectedKeys.remove();
// skip if not valid
if (!key.isValid()) {
cleanupSelectionKey(key);
continue;
}
if (key.isReadable()) {//如果是读事件
// deal with reads
handleRead(key);
} else if (key.isWritable()) {//如果是写事件
// deal with writes
handleWrite(key);
} else {
LOGGER.warn("Unexpected state in select! " + key.interestOps());
}
}
} catch (IOException e) {
LOGGER.warn("Got an IOException while selecting!", e);
}
}
//处理接收到的连接,注册事件
private void processAcceptedConnections() {
// Register accepted connections
while (!stopped_) {
TNonblockingTransport accepted = acceptedQueue.poll();
if (accepted == null) {
break;
}
registerAccepted(accepted);
}
}
protected FrameBuffer createFrameBuffer(final TNonblockingTransport trans, final SelectionKey selectionKey,
final AbstractSelectThread selectThread) {
return processorFactory_.isAsyncProcessor() ? new AsyncFrameBuffer(trans, selectionKey, selectThread)
: new FrameBuffer(trans, selectionKey, selectThread);
}
private void registerAccepted(TNonblockingTransport accepted) {
SelectionKey clientKey = null;
try {
clientKey = accepted.registerSelector(selector, SelectionKey.OP_READ);
//注册后创建一个buffer作为附件
FrameBuffer frameBuffer = createFrameBuffer(accepted, clientKey, SelectorThread.this);
clientKey.attach(frameBuffer);
} catch (IOException e) {
LOGGER.warn("Failed to register accepted connection to selector!", e);
if (clientKey != null) {
cleanupSelectionKey(clientKey);
}
accepted.close();
}
}
} // SelectorThread
//读写事件处理继承自AbstractSelectThread
}
6. TServlet/TExtensibleServlet: thrift也支持http协议,直接继承javax.servlet.http.HttpServlet从写它的生命周期方法,init(),doPost(),doGet(),
init中初始化协议工厂和处理器、
doPost,doGet中处理具体的业务
/**
* Servlet implementation class ThriftServer
*/
public class TServlet extends HttpServlet {
private final TProcessor processor;
private final TProtocolFactory inProtocolFactory;
private final TProtocolFactory outProtocolFactory;
private final Collection<Map.Entry<String, String>> customHeaders;
/**
* @see HttpServlet#HttpServlet()
*/
public TServlet(TProcessor processor, TProtocolFactory inProtocolFactory, TProtocolFactory outProtocolFactory) {
super();
this.processor = processor;
this.inProtocolFactory = inProtocolFactory;
this.outProtocolFactory = outProtocolFactory;
this.customHeaders = new ArrayList<Map.Entry<String, String>>();
}
/**
* @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse
* response)
*/
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
TTransport inTransport = null;
TTransport outTransport = null;
try {
response.setContentType("application/x-thrift");
if (null != this.customHeaders) {
for (Map.Entry<String, String> header : this.customHeaders) {
response.addHeader(header.getKey(), header.getValue());
}
}
InputStream in = request.getInputStream();
OutputStream out = response.getOutputStream();
TTransport transport = new TIOStreamTransport(in, out);
inTransport = transport;
outTransport = transport;
//获取输入输出
TProtocol inProtocol = inProtocolFactory.getProtocol(inTransport);
TProtocol outProtocol = outProtocolFactory.getProtocol(outTransport);
processor.process(inProtocol, outProtocol);//处理业务
out.flush();
} catch (TException te) {
throw new ServletException(te);
}
}
/**
* @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse
* response)
*/
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
doPost(request, response);
}
}
/**
* Servlet implementation class ThriftServer
*/
public class TServlet extends HttpServlet {
private final TProcessor processor;
private final TProtocolFactory inProtocolFactory;
private final TProtocolFactory outProtocolFactory;
private final Collection<Map.Entry<String, String>> customHeaders;
/**
* @see HttpServlet#HttpServlet()
*/
public TServlet(TProcessor processor, TProtocolFactory inProtocolFactory, TProtocolFactory outProtocolFactory) {
super();
this.processor = processor;
this.inProtocolFactory = inProtocolFactory;
this.outProtocolFactory = outProtocolFactory;
this.customHeaders = new ArrayList<Map.Entry<String, String>>();
}
/**
* @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse
* response)
*/
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
TTransport inTransport = null;
TTransport outTransport = null;
try {
response.setContentType("application/x-thrift");
if (null != this.customHeaders) {
for (Map.Entry<String, String> header : this.customHeaders) {
response.addHeader(header.getKey(), header.getValue());
}
}
InputStream in = request.getInputStream();
OutputStream out = response.getOutputStream();
TTransport transport = new TIOStreamTransport(in, out);
inTransport = transport;
outTransport = transport;
//获取输入输出
TProtocol inProtocol = inProtocolFactory.getProtocol(inTransport);
TProtocol outProtocol = outProtocolFactory.getProtocol(outTransport);
processor.process(inProtocol, outProtocol);//处理业务
out.flush();
} catch (TException te) {
throw new ServletException(te);
}
}
/**
* @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse
* response)
*/
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
doPost(request, response);
}
}