本文基于Android 8.0源码分析。以mediaserver为例剖析binder addService的流程。
先来看下addService的时序图:
main_mediaserver ProcessState MediaPlayerService BpServiceManager BpBinder IPCThreadState binder self() new ProcessState() open_driver open instantiate new MediaPlayerService() addService transact transact writeTransactionData waitForResponse talkWithDriver ioctl
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 int main (int argc __unused, char **argv __unused) { signal (SIGPIPE, SIG_IGN); sp<ProcessState> proc (ProcessState::self()) ; sp<IServiceManager> sm (defaultServiceManager()) ; ALOGI ("ServiceManager: %p" , sm.get ()); InitializeIcuOrDie (); MediaPlayerService::instantiate (); ResourceManagerService::instantiate (); registerExtensions (); ProcessState::self ()->startThreadPool (); IPCThreadState::self ()->joinThreadPool (); }
1. ProcessState打开binder设备 ProcessState是一个进程相关的对象,一个对象只有一个ProcessState。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 sp<ProcessState> ProcessState::self () { Mutex::Autolock _l(gProcessMutex); if (gProcess != NULL ) { return gProcess; } gProcess = new ProcessState ("/dev/binder" ); return gProcess; } ProcessState::ProcessState (const char *driver) : mDriverName (String8 (driver)) , mDriverFD (open_driver (driver)) ...... { if (mDriverFD >= 0 ) { mVMStart = mmap (0 , BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0 ); if (mVMStart == MAP_FAILED) { ALOGE ("Using /dev/binder failed: unable to mmap transaction memory.\n" ); close (mDriverFD); mDriverFD = -1 ; mDriverName.clear (); } } ...... }
ProcessState构造函数中,调用mmap把binder设备内核空间的一段内存映射到用户空间。mmap的具体作用跟设备的实现有关系。 关于mmap的介绍可以参考这篇文章:Linux 内存映射函数 mmap()函数详解
现在看下open_driver是如何打开binder设备的。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 static int open_driver (const char *driver) { int fd = open (driver, O_RDWR | O_CLOEXEC); if (fd >= 0 ) { int vers = 0 ; status_t result = ioctl (fd, BINDER_VERSION, &vers); if (result == -1 ) { ALOGE ("Binder ioctl to obtain version failed: %s" , strerror (errno)); close (fd); fd = -1 ; } if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) { ALOGE ("Binder driver protocol(%d) does not match user space protocol(%d)! ioctl() return value: %d" , vers, BINDER_CURRENT_PROTOCOL_VERSION, result); close (fd); fd = -1 ; } size_t maxThreads = DEFAULT_MAX_BINDER_THREADS; result = ioctl (fd, BINDER_SET_MAX_THREADS, &maxThreads); if (result == -1 ) { ALOGE ("Binder ioctl to set max threads failed: %s" , strerror (errno)); } } else { ALOGW ("Opening '%s' failed: %s\n" , driver, strerror (errno)); } return fd; }
由此可以看出打开binder设备的操作还是很简单的。
2. 获取 IServiceManager 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 sp<IServiceManager> defaultServiceManager () { if (gDefaultServiceManager != NULL ) return gDefaultServiceManager; { AutoMutex _l(gDefaultServiceManagerLock); while (gDefaultServiceManager == NULL ) { gDefaultServiceManager = interface_cast <IServiceManager>( ProcessState::self ()->getContextObject (NULL )); if (gDefaultServiceManager == NULL ) sleep (1 ); } } return gDefaultServiceManager; }
ProcessState::self()->getContextObject(NULL)最终返回的是 new BpBinder(0); ,其中0表示的就是ServiceManager在binder系统中的标识。
interface_cast的定义如下,是一个模板函数
1 2 3 4 5 6 7 8 9 10 11 12 template <typename INTERFACE>inline sp<INTERFACE> interface_cast (const sp<IBinder>& obj) { return INTERFACE::asInterface (obj); } inline sp<IServiceManager> interface_cast (const sp<IBinder>& obj) { return IServiceManager::asInterface (obj); }
所以最终调用的是IServiceManager::asInterface(new BpBinder(0)),所以需要再分析一下IServiceManager::asInterface函数。
看一下IServiceManager的定义,非常简单:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 class IServiceManager : public IInterface{ public : DECLARE_META_INTERFACE (ServiceManager) virtual sp<IBinder> getService ( const String16& name) const = 0 ; virtual sp<IBinder> checkService ( const String16& name) const = 0 ; virtual status_t addService ( const String16& name, const sp<IBinder>& service, bool allowIsolated = false ) = 0 ; virtual Vector<String16> listServices () = 0 ; enum { GET_SERVICE_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION, CHECK_SERVICE_TRANSACTION, ADD_SERVICE_TRANSACTION, LIST_SERVICES_TRANSACTION, }; };
其中DECLARE_META_INTERFACE是个宏定义,与IMPLEMENT_META_INTERFACE宏配合使用,是谷歌提供的用于native service层对接binder的两个模板宏:
1 2 3 4 5 6 7 8 #define DECLARE_META_INTERFACE(INTERFACE) \ static const ::android::String16 descriptor; \ static ::android::sp<I##INTERFACE> asInterface( \ const ::android::sp<::android::IBinder> & obj); \ virtual const ::android::String16& getInterfaceDescriptor() const; \ I##INTERFACE(); \ virtual ~I##INTERFACE(); \
宏的参数替换成 ServiceManager 后,可以得到IServiceManager类的头文件声明:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 class IServiceManager : public IInterface{ public : static const ::android::String16 descriptor; static ::android::sp<IServiceManager> asInterface ( const ::android::sp<::android::IBinder>& obj) ; virtual const ::android::String16& getInterfaceDescriptor () const ; IServiceManager (); virtual ~IServiceManager (); virtual sp<IBinder> getService ( const String16& name) const = 0 ; virtual sp<IBinder> checkService ( const String16& name) const = 0 ; virtual status_t addService ( const String16& name, const sp<IBinder>& service, bool allowIsolated = false ) = 0 ; virtual Vector<String16> listServices () = 0 ; enum { GET_SERVICE_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION, CHECK_SERVICE_TRANSACTION, ADD_SERVICE_TRANSACTION, LIST_SERVICES_TRANSACTION, }; };
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 #define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \ const ::android::String16 I##INTERFACE::descriptor(NAME); \ const ::android::String16& \ I##INTERFACE::getInterfaceDescriptor() const { \ return I##INTERFACE::descriptor; \ } \ ::android::sp<I##INTERFACE> I##INTERFACE::asInterface( \ const ::android::sp<::android::IBinder> & obj) \ { \ ::android::sp<I##INTERFACE> intr; \ if (obj != NULL) { \ intr = static_cast<I##INTERFACE*> ( \ obj->queryLocalInterface( \ I##INTERFACE::descriptor).get()); \ if (intr == NULL) { \ intr = new Bp##INTERFACE(obj); \ } \ } \ return intr; \ } \ I##INTERFACE::I##INTERFACE() { } \ I##INTERFACE::~I##INTERFACE() { } \
再看下IServiceManager.cpp中IMPLEMENT_META_INTERFACE宏的使用
1 2 IMPLEMENT_META_INTERFACE (ServiceManager, "android.os.IServiceManager" );
把宏定义展开后,IServiceManager.cpp源码如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 const ::android::String16 IServiceManager::descriptor ("android.os.IServiceManager" ) ; const ::android::String16& IServiceManager::getInterfaceDescriptor () const { return IServiceManager::descriptor; } ::android::sp<IServiceManager> IServiceManager::asInterface ( const ::android::sp<::android::IBinder>& obj) { ::android::sp<IServiceManager> intr; if (obj != NULL ) { intr = static_cast <IServiceManager*>( obj->queryLocalInterface (IServiceManagerE::descriptor).get ()); if (intr == NULL ) { intr = new BpServiceManager (obj); } } return intr; } IServiceManager::IServiceManager () { } IServiceManager::~IServiceManager () { }
获取IServiceManager小结 获取IServiceManager的代码可以使用如下代码来等价的表示:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 sp<IServiceManager> defaultServiceManager () { if (gDefaultServiceManager != NULL ) return gDefaultServiceManager; { AutoMutex _l(gDefaultServiceManagerLock); while (gDefaultServiceManager == NULL ) { gDefaultServiceManager = new BpServiceManager (new BpBinder (0 )); if (gDefaultServiceManager == NULL ) sleep (1 ); } } return gDefaultServiceManager; }
从上面代码可以看出获取的IServiceManager就是BpServiceManager对象,BpServiceManager的构造函数需要一个BpBinder对象。这两个类的作用我们在后面详细介绍。
3. addService流程分析 再回顾下上面两个章节内容:
mediaserver进程首先创建了一个ProcessState,在ProcessState的构造函数中打开了binder设备;
获取IServiceManager(就是BpServiceManager对象),然后就可以与ServiceManager进行binder通信了。
通过上面介绍的两点,我们就可以推测出来,BpServiceManager肯定要使用ProcessState才能进行binder通信。实际情况也的确如此,BpServiceManager通过BpBinder对象来使用ProcessState。
首先看下MediaServer是怎么调用addService把多媒体服务添加到ServiceManager中的:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 int main (int argc __unused, char **argv __unused) { signal (SIGPIPE, SIG_IGN); sp<ProcessState> proc (ProcessState::self()) ; sp<IServiceManager> sm (defaultServiceManager()) ; ALOGI ("ServiceManager: %p" , sm.get ()); InitializeIcuOrDie (); MediaPlayerService::instantiate (); ResourceManagerService::instantiate (); registerExtensions (); ProcessState::self ()->startThreadPool (); IPCThreadState::self ()->joinThreadPool (); } void MediaPlayerService::instantiate () { defaultServiceManager ()->addService ( String16 ("media.player" ), new MediaPlayerService ()); }
BpServiceManager类 BpServiceManager就是binder跨进程通信机制的C/S架构的client端,属于业务层,定义在frameworks\native\libs\binder\IServiceManager.cpp文件中。首先看下BpServiceManager相关联的binder家族类图:
IBinder transact RefBase BpBinder IPCThreadState ProcessState IServiceManager IInterface asBinder BpInterface IServiceManager BpRefBase IBinder* mRemote BpServiceManager
BpServiceManager的父类BpRefBase持有了BpBinder对象,BpBinder会使用IPCThreadState,而IPCThreadState又使用ProcessState。通过这张图,BpServiceManager到ProcessState的脉络就清楚了。
BpInterface是个模板类,会继承它的模板参数,比如在此处继承了IServiceManager。从上面的类图中可以看出BpServiceManager并没有继承IBinder,而是通过父类BpInterface的mRemote成员变量持有了BpBinder(继承自IBinder)。BpServiceManager与ServiceManager服务端的交互都是通过BpBinder来进行的。
熟悉了BpServiceManager的脉络,再先看下通过addService添加的MediaPlayerService类的脉络,然后再继续分析ServiceManager的addService的流程:
IBinder IMediaPlayerService BBinder onTransact IInterface asBinder RefBase BnInterface IMediaPlayerService BnMediaPlayerService MediaPlayerService
可以看到MediaPlayerService继承自BBinder,就是binder C/S架构的服务端,是媒体服务(“media.player”)的提供者。通过ServiceManager系统把MediaPlayerService注册到binder系统中。
MediaPlayerService类并不是我们的主角,这里介绍MediaPlayerService的目的是来熟悉一下服务和BBinder的关系。addService的服务端的主角是service_manager(定义在service_manager.c中,会面会详细介绍),但是service_manager并没有接入BBinder框架,而是通过直接打开binder设备来通信。作为binder服务的管理者,service_manager在实现上就和普通的binder服务不同。
BpServiceManager::addService 来看下BpServiceManager的addService是如何实现的:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 class BpServiceManager : public BpInterface<IServiceManager>{ public : explicit BpServiceManager (const sp<IBinder>& impl) : BpInterface<IServiceManager>(impl) { } ...... virtual status_t addService (const String16& name, const sp<IBinder>& service, bool allowIsolated) { Parcel data, reply; data.writeInterfaceToken (IServiceManager::getInterfaceDescriptor ()); data.writeString16 (name); data.writeStrongBinder (service); data.writeInt32 (allowIsolated ? 1 : 0 ); status_t err = remote ()->transact (ADD_SERVICE_TRANSACTION, data, &reply); return err == NO_ERROR ? reply.readExceptionCode () : err; } ...... }
BpServiceManager的构造函数没有任何自定义实现,直接把参数(BpBinder)交给了父类。addService函数首先把service信息填充到Parcel中,然后调用BpBinder的transact。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 status_t BpBinder::transact ( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { if (mAlive) { status_t status = IPCThreadState::self ()->transact ( mHandle, code, data, reply, flags); if (status == DEAD_OBJECT) mAlive = 0 ; return status; } return DEAD_OBJECT; }
BpBinder的transact啥也没干,直接把请求交给了IPCThreadState的transact。首先看下IPCThreadState::self()的实现:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 static pthread_mutex_t gTLSMutex = PTHREAD_MUTEX_INITIALIZER;static bool gHaveTLS = false ;static pthread_key_t gTLS = 0 ; IPCThreadState* IPCThreadState::self () { if (gHaveTLS) { restart: const pthread_key_t k = gTLS; IPCThreadState* st = (IPCThreadState*)pthread_getspecific (k); if (st) return st; return new IPCThreadState; } ...... pthread_mutex_lock (&gTLSMutex); if (!gHaveTLS) { int key_create_value = pthread_key_create (&gTLS, threadDestructor); if (key_create_value != 0 ) { pthread_mutex_unlock (&gTLSMutex); ALOGW ("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n" , strerror (key_create_value)); return NULL ; } gHaveTLS = true ; } pthread_mutex_unlock (&gTLSMutex); goto restart; }
IPCThreadState::self()的作用就是创建了一个特定线程(TheadLocal数据)的IPCThreadState,所以每个线程维护一个IPCThreadState对象。关于native层ThreadLocal介绍可以参考 https://linux.die.net/man/3/pthread_key_create 。再看下IPCThreadState构造函数:
1 2 3 4 5 6 7 8 9 10 IPCThreadState::IPCThreadState () : mProcess (ProcessState::self ()), mStrictModePolicy (0 ), mLastTransactionBinderFlags (0 ) { pthread_setspecific (gTLS, this ); clearCaller (); mIn.setDataCapacity (256 ); mOut.setDataCapacity (256 ); }
IPCThreadState和ProcessState的关系就清楚了,通过mProcess变量持有了ProcessState对象的引用。
IPCThreadState::transact() 看一下IPCThreadState::transact的实现:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 status_t IPCThreadState::transact (int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { status_t err = data.errorCheck (); flags |= TF_ACCEPT_FDS; ...... if (err == NO_ERROR) { LOG_ONEWAY (">>>> SEND from pid %d uid %d %s" , getpid (), getuid (), (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY" ); err = writeTransactionData (BC_TRANSACTION, flags, handle, code, data, NULL ); } ...... if ((flags & TF_ONE_WAY) == 0 ) { ...... if (reply) { err = waitForResponse (reply); } else { Parcel fakeReply; err = waitForResponse (&fakeReply); } ...... } else { err = waitForResponse (NULL , NULL ); } return err; }
IPCThreadState::transact有两个重点函数调用:writeTransactionData和waitForResponse。分别看下。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 status_t IPCThreadState::writeTransactionData (int32_t cmd, uint32_t binderFlags, int32_t handle, uint32_t code, const Parcel& data, status_t * statusBuffer) { binder_transaction_data tr; tr.target.ptr = 0 ; tr.target.handle = handle; tr.code = code; tr.flags = binderFlags; tr.cookie = 0 ; tr.sender_pid = 0 ; tr.sender_euid = 0 ; const status_t err = data.errorCheck (); if (err == NO_ERROR) { tr.data_size = data.ipcDataSize (); tr.data.ptr.buffer = data.ipcData (); tr.offsets_size = data.ipcObjectsCount ()*sizeof (binder_size_t ); tr.data.ptr.offsets = data.ipcObjects (); } else if (statusBuffer) { ...... } else { return (mLastError = err); } mOut.writeInt32 (cmd); mOut.write (&tr, sizeof (tr)); return NO_ERROR; }
writeTransactionData把要通过binder传输的数据写入mOut。这里需要注意下数据结构的变更,首先把原始的Parcel(data)转换成binder_transaction_data结构体,然后又写入到mOut中,而mOut也是个Parcel对象。
Parcel等数据结构分析 我们有必要分析一下传输数据在这个过程中的转换过程。我们都知道,binder通信都是使用Parcel作为数据载体的,Parcel在传输基本数据类型(比如int、string)时,都直接写到成员变量mData指针指向的内存中。Parcel对象构建时,mData并没有指向任何内存,当真正写入数据时才通过memcpy来按需申请内存。mDataSize表示已经写入的数据大小,mDataPos指针指向下次要写入数据的位置。
再回顾一下BpServiceManager::addService
拼装初始Parcel数据的代码:
1 2 3 4 5 6 7 8 9 10 11 12 virtual status_t addService (const String16& name, const sp<IBinder>& service, bool allowIsolated) { Parcel data, reply; data.writeInterfaceToken (IServiceManager::getInterfaceDescriptor ()); data.writeString16 (name); data.writeStrongBinder (service); data.writeInt32 (allowIsolated ? 1 : 0 ); status_t err = remote ()->transact (ADD_SERVICE_TRANSACTION, data, &reply); return err == NO_ERROR ? reply.readExceptionCode () : err; }
上述代码执行后,Parcel中mData和mObjects两个成员变量可以用下图来表示:
parcel_data
mData
mData
data
len
“android.os.IServiceManager”
len
“media.player”
flat_binder_object
0
mData:p0->data:p0
mObjects
mObjects
objects
0
1
2
3
mObjects->objects
objects:n->data:sw
可以看到有个flat_binder_object结构体,这个结构体就是通过调用Parcel::writeStrongBinder
写入的sp<IBinder>
(本例中就是MediaPlayerService)。writeStrongBinder的实现与基本数据结构的写入是有区别的,专门用于承载IBinder对象的传输任务。来看下代码实现:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 status_t Parcel::writeStrongBinder (const sp<IBinder>& val) { return flatten_binder (ProcessState::self (), val, this ); } status_t flatten_binder (const sp<ProcessState>& , const sp<IBinder>& binder, Parcel* out) { flat_binder_object obj; if (IPCThreadState::self ()->backgroundSchedulingDisabled ()) { obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS; } else { obj.flags = 0x13 | FLAT_BINDER_FLAG_ACCEPTS_FDS; } if (binder != NULL ) { IBinder *local = binder->localBinder (); if (!local) { BpBinder *proxy = binder->remoteBinder (); if (proxy == NULL ) { ALOGE ("null proxy" ); } const int32_t handle = proxy ? proxy->handle () : 0 ; obj.type = BINDER_TYPE_HANDLE; obj.binder = 0 ; obj.handle = handle; obj.cookie = 0 ; } else { obj.type = BINDER_TYPE_BINDER; obj.binder = reinterpret_cast <uintptr_t >(local->getWeakRefs ()); obj.cookie = reinterpret_cast <uintptr_t >(local); } } else { obj.type = BINDER_TYPE_BINDER; obj.binder = 0 ; obj.cookie = 0 ; } return finish_flatten_binder (binder, obj, out); }
flatten_binder函数把IBinder封装成flat_binder_object结构体,然后调用finish_flatten_binder
。先看下flat_binder_object结构体的定义:
1 2 3 4 5 6 7 8 9 10 struct flat_binder_object { __u32 type; __u32 flags; union { binder_uintptr_t binder; __u32 handle; }; binder_uintptr_t cookie; };
进入finish_flatten_binder
函数看下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 inline static status_t finish_flatten_binder ( const sp<IBinder>& , const flat_binder_object& flat, Parcel* out) { return out->writeObject (flat, false ); } status_t Parcel::writeObject (const flat_binder_object& val, bool nullMetaData) { const bool enoughData = (mDataPos+sizeof (val)) <= mDataCapacity; const bool enoughObjects = mObjectsSize < mObjectsCapacity; if (enoughData && enoughObjects) { restart_write: *reinterpret_cast <flat_binder_object*>(mData+mDataPos) = val; if (val.type == BINDER_TYPE_FD) { if (!mAllowFds) { return FDS_NOT_ALLOWED; } mHasFds = mFdsKnown = true ; } if (nullMetaData || val.binder != 0 ) { mObjects[mObjectsSize] = mDataPos; acquire_object (ProcessState::self (), val, this , &mOpenAshmemSize); mObjectsSize++; } return finishWrite (sizeof (flat_binder_object)); } if (!enoughData) { const status_t err = growData (sizeof (val)); if (err != NO_ERROR) return err; } if (!enoughObjects) { size_t newSize = ((mObjectsSize+2 )*3 )/2 ; if (newSize*sizeof (binder_size_t ) < mObjectsSize) return NO_MEMORY; binder_size_t * objects = (binder_size_t *)realloc (mObjects, newSize*sizeof (binder_size_t )); if (objects == NULL ) return NO_MEMORY; mObjects = objects; mObjectsCapacity = newSize; } goto restart_write; }
至此,Parcel数据结构的拼装就分析完了。但是作为binder传输的数据载体,Parcel的任务还没有结束。继续沿着传输链条向后分析。先来回忆一下数据传输链条:
BpServiceManager::addService
中拼装完Parcel数据结构后,就调用了remote()->transact
,remote()
函数返回的就是BpBinder;
BpBinder::transact接收到Parcel后,原封不动的传给了IPCThreadState::self()->transact
;
IPCThreadState::self()->transact
会调用IPCThreadState::writeTransactionData
把原Parcel数据写入变量名为mOut的Parcel对象中。
现在我们再把IPCThreadState::writeTransactionData
函数贴出来看下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 status_t IPCThreadState::writeTransactionData (int32_t cmd, uint32_t binderFlags, int32_t handle, uint32_t code, const Parcel& data, status_t * statusBuffer) { binder_transaction_data tr; tr.target.ptr = 0 ; tr.target.handle = handle; tr.code = code; tr.flags = binderFlags; tr.cookie = 0 ; tr.sender_pid = 0 ; tr.sender_euid = 0 ; const status_t err = data.errorCheck (); if (err == NO_ERROR) { tr.data_size = data.ipcDataSize (); tr.data.ptr.buffer = data.ipcData (); tr.offsets_size = data.ipcObjectsCount ()*sizeof (binder_size_t ); tr.data.ptr.offsets = data.ipcObjects (); } else if (statusBuffer) { ...... } else { return (mLastError = err); } mOut.writeInt32 (cmd); mOut.write (&tr, sizeof (tr)); return NO_ERROR; }
传输数据已经写入到mOut表示Parcel类中,我们看下此时的mOut数据:
parcel_data
mData
mData
data
cmd
binder_transaction_data
mData:0->data:0
来看下binder_transaction_data
结构体的定义:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 struct binder_transaction_data { union { __u32 handle; binder_uintptr_t ptr; } target; binder_uintptr_t cookie; __u32 code; __u32 flags; pid_t sender_pid; uid_t sender_euid; binder_size_t data_size; binder_size_t offsets_size; union { struct { binder_uintptr_t buffer; binder_uintptr_t offsets; } ptr; __u8 buf[8 ]; } data; };
IPCThreadState::waitForResponse 现在要发送的数据已经写到IPCThreadState::mOut中了,怎么发送出去的呢?
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 status_t IPCThreadState::waitForResponse (Parcel *reply, status_t *acquireResult) { uint32_t cmd; int32_t err; while (1 ) { if ((err=talkWithDriver ()) < NO_ERROR) break ; err = mIn.errorCheck (); if (err < NO_ERROR) break ; if (mIn.dataAvail () == 0 ) continue ; cmd = (uint32_t )mIn.readInt32 (); ...... switch (cmd) { case BR_TRANSACTION_COMPLETE: if (!reply && !acquireResult) goto finish; break ; ...... } } finish: if (err != NO_ERROR) { if (acquireResult) *acquireResult = err; if (reply) reply->setError (err); mLastError = err; } return err; }
IPCThreadState::talkWithDriver 无比关键的talkWithDriver
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 status_t IPCThreadState::talkWithDriver (bool doReceive) { ...... binder_write_read bwr; const bool needRead = mIn.dataPosition () >= mIn.dataSize (); const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize () : 0 ; bwr.write_size = outAvail; bwr.write_buffer = (uintptr_t )mOut.data (); if (doReceive && needRead) { bwr.read_size = mIn.dataCapacity (); bwr.read_buffer = (uintptr_t )mIn.data (); } else { bwr.read_size = 0 ; bwr.read_buffer = 0 ; } ...... bwr.write_consumed = 0 ; bwr.read_consumed = 0 ; status_t err; do { if (ioctl (mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0 ) err = NO_ERROR; else err = -errno; if (mProcess->mDriverFD <= 0 ) { err = -EBADF; } } while (err == -EINTR); ...... if (err >= NO_ERROR) { if (bwr.write_consumed > 0 ) { if (bwr.write_consumed < mOut.dataSize ()) mOut.remove (0 , bwr.write_consumed); else mOut.setDataSize (0 ); } if (bwr.read_consumed > 0 ) { mIn.setDataSize (bwr.read_consumed); mIn.setDataPosition (0 ); } ...... return NO_ERROR; } return err; }
addService的客户端的流程就分析到这里。关于binder驱动里面的处理可以分析内核源码,从binder.c的binder_ioctl
函数开始分析。binder_ioctl
就是系统调用ioctl
的执行函数。后续会持续补充…
4. ServiceManager分析 入口函数 main 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 int main (int argc, char ** argv) { struct binder_state *bs; union selinux_callback cb; char *driver; if (argc > 1 ) { driver = argv[1 ]; } else { driver = "/dev/binder" ; } bs = binder_open (driver, 128 *1024 ); ...... if (binder_become_context_manager (bs)) { ALOGE ("cannot become context manager (%s)\n" , strerror (errno)); return -1 ; } ...... binder_loop (bs, svcmgr_handler); return 0 ; }
service_manager的main函数主要有3个重要步骤:
binder_open打开binder设备;
成为context_manager,也就是ServiceManager;
进入binder_loop;
我们一步步进行分析
ServiceManager打开binder设备 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 struct binder_state *binder_open (const char * driver, size_t mapsize){ struct binder_state *bs; struct binder_version vers; bs = malloc (sizeof (*bs)); if (!bs) { errno = ENOMEM; return NULL ; } bs->fd = open (driver, O_RDWR | O_CLOEXEC); if (bs->fd < 0 ) { fprintf (stderr,"binder: cannot open %s (%s)\n" , driver, strerror (errno)); goto fail_open; } if ((ioctl (bs->fd, BINDER_VERSION, &vers) == -1 ) || (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) { fprintf (stderr, "binder: kernel driver version (%d) differs from user space version (%d)\n" , vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION); goto fail_open; } bs->mapsize = mapsize; bs->mapped = mmap (NULL , mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0 ); if (bs->mapped == MAP_FAILED) { fprintf (stderr,"binder: cannot map device (%s)\n" , strerror (errno)); goto fail_map; } return bs; fail_map: close (bs->fd); fail_open: free (bs); return NULL ; }
成为context_manager 其实就是ServiceManager。通过系统调用ioctl的BINDER_SET_CONTEXT_MGR命令来设置。
1 2 3 4 int binder_become_context_manager (struct binder_state *bs) { return ioctl (bs->fd, BINDER_SET_CONTEXT_MGR, 0 ); }
binder驱动中的处理:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 static struct binder_node *binder_context_mgr_node;static long binder_ioctl (struct file *filp, unsigned int cmd, unsigned long arg) { ... switch (cmd) { ... case BINDER_SET_CONTEXT_MGR: ... ret = binder_ioctl_set_ctx_mgr (filp); if (ret) goto err; break ; ... } } static int binder_ioctl_set_ctx_mgr (struct file *filp) { int ret = 0 ; struct binder_proc *proc = filp->private_data; kuid_t curr_euid = current_euid (); if (binder_context_mgr_node != NULL ) { pr_err ("BINDER_SET_CONTEXT_MGR already set\n" ); ret = -EBUSY; goto out; } ret = security_binder_set_context_mgr (proc->tsk); if (ret < 0 ) goto out; if (uid_valid (binder_context_mgr_uid)) { if (!uid_eq (binder_context_mgr_uid, curr_euid)) { pr_err ("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n" , from_kuid (&init_user_ns, curr_euid), from_kuid (&init_user_ns, binder_context_mgr_uid)); ret = -EPERM; goto out; } } else { binder_context_mgr_uid = curr_euid; } binder_context_mgr_node = binder_new_node (proc, 0 , 0 ); if (binder_context_mgr_node == NULL ) { ret = -ENOMEM; goto out; } binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1 ; binder_context_mgr_node->has_weak_ref = 1 ; out: return ret; }
成为context_manager就是把ServiceManager进程的信息保存到了binder驱动的binder_context_mgr_node结构体中。想要更深入分析需要了解内核编程,这里暂时就不详细分析了。
binder_loop循环 然后就进入了binder_loop
,该函数的第二个参数非常重要,接收到消息后都是在改函数中进行处理的。先看下binder_loop
函数的实现:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 void binder_loop (struct binder_state *bs, binder_handler func) { int res; struct binder_write_read bwr; uint32_t readbuf[32 ]; bwr.write_size = 0 ; bwr.write_consumed = 0 ; bwr.write_buffer = 0 ; readbuf[0 ] = BC_ENTER_LOOPER; binder_write (bs, readbuf, sizeof (uint32_t )); for (;;) { bwr.read_size = sizeof (readbuf); bwr.read_consumed = 0 ; bwr.read_buffer = (uintptr_t ) readbuf; res = ioctl (bs->fd, BINDER_WRITE_READ, &bwr); if (res < 0 ) { ALOGE ("binder_loop: ioctl failed (%s)\n" , strerror (errno)); break ; } res = binder_parse (bs, 0 , (uintptr_t ) readbuf, bwr.read_consumed, func); if (res == 0 ) { ALOGE ("binder_loop: unexpected reply?!\n" ); break ; } if (res < 0 ) { ALOGE ("binder_loop: io error %d %s\n" , res, strerror (errno)); break ; } } }