一、RPC与IPC关系
Binder机制对于Android系统学习就犹如根文件系统挂载部分对于Linux内核学习;是一座不得不翻越的大山(指其复杂程度与重要性)。
1.RPC基本流程
RPC在调用一个远程过程后,自己进入等待状态,传往远程过程的参数包括过程参数,返回参数包括执行结果;当收到包括执行结果的消息后,本地进程从消息中取得结果,调用进程重新开始执行。在服务器一方,有一个程序在等待调用,当有一个调用到达时,服务器进程取得进程参数,计算结果,然后返回结果。
调用可以同步的也可以是异步的;服务器可以创建一个线程来接收用户请求,也可以自己来接收用户请求。
2.二者关系
相同点:二者都可以用于进程间;
不同点:RPC强调的是调用、即一个进程直接调用另外一个进程中的方法,而IPC仅仅完成进程间的互通信、没有函数调用功能。
总结:RPC其实就是添加了进程间函数调用功能的IPC。
二、Android系统RPC与Binder的关系
Android系统中的Binder为IPC的一种实现方式,为Android系统RPC机制提供底层支持;其他常见的RPC还有COM组件、CORBA架构等。不同之处在于Android的RPC并不需要实现不同主机或不同操作系统间的远程调用,所以、它属于一个轻量级的RPC。
总结:Android系统的RPC = Binder进程间通信 + 在Binder基础上建立起来的进程间函数调用机制。
三、Android系统RPC实现
1.基本原理
a)服务端调用Binder驱动;并开启线程,反复调用Binder驱动的读接口、服务端继承Bnxxx进而继承BBinder的处理函数和Binder驱动的写接口,其中读接口会阻塞。
b)客户端通过Bpxxx继承Bpbinder调用Binder驱动;并开启线程,首先调用Binder驱动写接口、唤醒服务端,然后客户端调用Binder驱动读接口、并阻塞,服务器端处理完后调用写接口、唤醒阻塞中的客户端;如此,完成调用逻辑。
JAVA层AIDL——AIDL对Binder的使用进行了封装,可以让开发者方便的进行方法的远程调用。
2.具体实现
Binder负责IPC,主要就是Binder驱动;
RPC机制的的实现由如下组件完成:Client、Server、Service Manager。
下边着重分析上处组件。
3.Server服务组件
创建一个线程来接收、处理用户请求,发送执行结果:
sp<ProcessState> proc(ProcessState::self()); //打开Binder设备
ProcessState::self()->startThreadPool(); //开启监听线程
IPCThreadState::self()->joinThreadPool(); //将线程加入,loop等待客户端命令
talkWithDriver(); //接收/发送数据
executeCommand(); //指令处理
关键代码流程:
frameworks/native/libs/binder/IPCThreadState.cpp
void IPCThreadState::joinThreadPool(bool isMain){
do {
result = talkWithDriver(); //接收/发送数据
result = executeCommand(cmd); //指令处理
}while (result != -ECONNREFUSED && result != -EBADF);
}
//接收/发送数据
status_t IPCThreadState::talkWithDriver(bool doReceive){
binder_write_read bwr;
bwr.write_size = outAvail;
bwr.write_buffer = (long unsigned int)mOut.data();
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (long unsigned int)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
bwr.write_consumed = 0;
bwr.read_consumed = 0;
do {
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0) //重要;下边会分析
err = NO_ERROR;
}while (err == -EINTR);
}
//指令处理
status_t IPCThreadState::executeCommand(int32_t cmd){
switch (cmd) {
case BR_TRANSACTION:{
sp<BBinder> b((BBinder*)tr.cookie);
const status_t error = b->transact(tr.code, buffer, &reply, tr.flags);
/*
frameworks/native/libs/binder/Binder.cpp
status_t BBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags){
err = onTransact(code, data, reply, flags);
}
以下为Service继承Bnxxx进而继承BBinder实现的该接口,也就是实际调用的服务端的接口
*/
}
}
}
4.Client客户端组件
创建一个线程来发送请求,接收执行结果:
ProcessState::self()->getContextObject(NULL);//获得ServiceManager接口
GetService(); //获得代理接口Bpxxx
remote()->transact(); //发送请求并等待结果返回
这部分先不做讨论,后边《四》中将会看到Client/Server组件调用Service Manager的例子
5.主要使用地方就是服务创建部分:
SystemServer服务——frameworks/base/cmds/system_server/library/System_init.cpp
MediaService服务——frameworks/av/media/mediaserver/Main_mediaserver.cpp
供JAVA调用——frameworks/base/core/jni/android_util_Binder.cpp
供Native调用——frameworks/native/include/binder/BinderService.cpp
四、Binder远程同步调用的实现——ServiceManager
Binder驱动在读函数中:wait_event_interruptible(thread->wait, binder_has_thread_work(thread));阻塞睡眠。
Binder驱动在写函数中:wake_up_interruptible(target_wait);唤醒另外一个进程。
该过程是实现不同进程间同步调用的关键。
1.Client和Server组件调用ServiceManager的远程接口BpServiceManager
Server端:BpServiceManager::addService
Client端:BpServiceManager::getService
BpServiceManager::addService中如下:
frameworks/native/libs/binder/IServiceManager.cpp
class BpServiceManager : public BpInterface<IServiceManager>{
virtual status_t addService(const String16& name, const sp<IBinder>& service,
bool allowIsolated){
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
/*
模板类
frameworks/native/include/binder/IInterface.h
template<typename INTERFACE>
class BpInterface : public INTERFACE, public BpRefBase{
public:
BpInterface(const sp<IBinder>& remote);
}
*/
}
}
实际调用如下:
frameworks/native/libs/binder/Binder.cpp
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags){
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags); //mHandle就是目标,这里是0
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags){
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
/*
binder_transaction_data tr;
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
*/
err = waitForResponse(reply);
}
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult){
if ((err=talkWithDriver()) < NO_ERROR) break; //阻塞
}
status_t IPCThreadState::talkWithDriver(bool doReceive){
bwr.write_consumed = 0;
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
}
drivers/staging/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg){
switch (cmd) {
case BINDER_WRITE_READ: {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); //阻塞
}
}
}
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer, int size, signed long *consumed){
switch (cmd) {
case BC_TRANSACTION: //一般都是这个
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
}
}
static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
struct binder_transaction_data *tr, int reply){
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
if (tr->target.handle) {
}esle{
target_node = binder_context_mgr_node; //重要
}
target_proc = target_node->proc; //重要
if (target_thread) {
} else {
target_list = &target_proc->todo; //重要
target_wait = &target_proc->wait; //重要
}
//唤醒ServiceManager线程
list_add_tail(&t->work.entry, target_list);
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait); //唤醒ServiceManager
}
唤醒ServiceManager并将说明需要调用的接口以及入参。
2.ServiceManager组件如何响应Client和Server组件调用过程
先补充下Service Manager进程启动过程:
frameworks/base/cmds/servicemanager/service_manager.c
int main(int argc, char **argv){
binder_loop(bs, svcmgr_handler);
}
frameworks/base/cmds/servicemanager/binder.c
void binder_loop(struct binder_state *bs, binder_handler func){
for(;;){
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
}
}
drivers/staging/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg){
if (bwr.read_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
}
}
static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer, int size, signed long *consumed, int non_block){
ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); //等待被唤醒
}
=========================================
//被唤醒后
frameworks/base/cmds/servicemanager/service_manager.c
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uint32_t *ptr, uint32_t size, binder_handler func){
switch(cmd) {
case BR_TRANSACTION: {
binder_send_reply(bs, &reply, txn->data, res);
}
}
}
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
void *buffer_to_free,
int status){
binder_write(bs, &data, sizeof(data));
}
int binder_write(struct binder_state *bs, void *data, unsigned len){
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (unsigned) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
}
唤醒Client或Server组件并将执行结果返回。
六、Binder驱动部分
drivers/staging/android/binder.c
static struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl, //控制操作接口
.mmap = binder_mmap, //mmap系统调用
.open = binder_open, //打开接口
.flush = binder_flush,
.release = binder_release, //关闭接口
};
static struct miscdevice binder_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "binder",
.fops = &binder_fops
};
static int __init binder_init(void){
int ret;
binder_proc_dir_entry_root = proc_mkdir("binder", NULL);
if (binder_proc_dir_entry_root)
binder_proc_dir_entry_proc = proc_mkdir("proc", binder_proc_dir_entry_root);
ret = misc_register(&binder_miscdev); //注册设备并创建设备节点
if (binder_proc_dir_entry_root) {
create_proc_read_entry("state", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_state, NULL);
create_proc_read_entry("stats", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_stats, NULL);
create_proc_read_entry("transactions", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transactions, NULL);
create_proc_read_entry("transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log);
create_proc_read_entry("failed_transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log_failed);
}
return ret;
}
device_initcall(binder_init);
七、一些dome
1.Native层的binder应用:Android Binder机制的Native应用
2.Native层的binder双向调用:Android Binder机制的Native应用—双向通信
3.JAVA层的binder实例:Android Service学习之AIDL, Parcelable和远程服务