目录
surfaceflinger启动
surfaceflinger可执行文件由main_surfaceflinger.cpp 文件独立编译而成,主要负责搭建进程启动环境。
\frameworks\native\services\surfaceflinger\main_surfaceflinger.cpp
int main(int, char**) {
signal(SIGPIPE, SIG_IGN); //当对端(客户端/服务端)的socket关闭时,防止进程退出。
//hidl service启动时要设置binder的线程池
hardware::configureRpcThreadpool(1 /* maxThreads */, //用于设置 当前进程用于hwbinder
通信的最大线程数:
false /* callerWillJoin */);
configureRpcThreadpool
设置用于hwbinder
通信的线程数
configureRpcThreadpool
用于设置 当前进程用于hwbinder
通信的最大线程数:
\system\libhidl\transport\HidlTransportSupport.cpp
void configureRpcThreadpool(size_t maxThreads, bool callerWillJoin) {
// TODO(b/32756130) this should be transport-dependent
configureBinderRpcThreadpool(maxThreads, callerWillJoin);
}
\system\libhidl\transport\HidlBinderSupport.cpp
void configureBinderRpcThreadpool(size_t maxThreads, bool callerWillJoin) {
ProcessState::self()->setThreadPoolConfiguration(maxThreads, callerWillJoin /*callerJoinsPool*/);
}
这里 google 是为了HIDL实现一套专用 ProcessState
、IPCThreadState
、BpHwBinder
。
HIDL是用于指定 HAL 和其用户之间的接口的统一接口。
此处configureBinderRpcThreadpool
方法内,一行代码共做了两个操作:
ProcessState::self()
类似binder
框架里面的流程,在当前进程中初始化hwbinder
setThreadPoolConfiguration
设置当前进程用于hwBinder
通信的进程数
当前进程fd中hwbinder框架的初始化
hidl 的 PrcessState、IPCThreadState… 的代码位置为:
system/libhwbinder/ProcessState.cpp
system/libhwbinder/IPCThreadState.cpp
在self 方法里面创建 ProcessState对象
\system\libhwbinder\ProcessState.cpp
sp<ProcessState> ProcessState::self() {//加锁,单例实现 Mutex::Autolock _l(gProcessMutex); if (gProcess != NULL) { //C++中的单例模式 return gProcess; } gProcess = new ProcessState;//创建 ProcessState对象 return gProcess; }
ProcessState
的构造方法中,会使用 open_driver
方法,去打开 /dev/hwbinder
节点,用来在 kernel
里面初始化 此进程 相关hwbinder
数据结构, 并将打开 hwbinder
节点的 fd
保存在mDriverFD
变量里面,之后通过此 fd
来和 kernel
内部的驱动进行通信。
ProcessState::ProcessState(size_t mmap_size)
: mDriverFD(open_driver())
, mVMStart(MAP_FAILED)
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mSpawnThreadOnStart(true)
, mThreadPoolSeq(1)
, mMmapSize(mmap_size)
{
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
mVMStart = mmap(0, mMmapSize, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
// *sigh*
ALOGE("Using /dev/hwbinder failed: unable to mmap transaction memory.\n");
close(mDriverFD);
mDriverFD = -1;
}
}
else {
ALOGE("Binder driver could not be opened. Terminating.");
}
}
open_driver
会直接区打开 /dev/hwbinder
节点。
此方法会通知hwbinder
驱动,在kernel
中初始化 此进程中 hwBinder
相关的数据结构。
static int open_driver()
{
int fd = open("/dev/hwbinder", O_RDWR | O_CLOEXEC);
if (fd >= 0) {
int vers = 0;
status_t result = ioctl(fd, BINDER_VERSION, &vers); //通知将此进程中的binder
版本,与kernel
的版本进行对比
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol(%d) does not match user space protocol(%d)!", vers, BINDER_CURRENT_PROTOCOL_VERSION);
close(fd);
fd = -1;
}
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads); //设置fd进程可用于hwBinder
通信的最大进程数
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '/dev/hwbinder' failed: %s\n", strerror(errno));
}
return fd;
}
BINDER_VERSION
将此进程中的binder版本,通过ioctl 方法告诉kernel,与kernel的版本进行对比
BINDER_SET_MAX_THREADS
设置此进程给用于hwbinder 通信的线程数,为BINDER_SET_MAX_THREADS
setThreadPoolConfiguration
在此处ProcessState设置完之后,又通过setThreadPoolConfiguration
,对通信线程数进行了修改。
status_t ProcessState::setThreadPoolConfiguration(size_t maxThreads, bool callerJoinsPool) {
LOG_ALWAYS_FATAL_IF(maxThreads < 1, "Binder threadpool must have a minimum of one thread.");
status_t result = NO_ERROR;
// the BINDER_SET_MAX_THREADS ioctl really tells the kernel how many threads
// it's allowed to spawn, *in addition* to any threads we may have already
// spawned locally. If 'callerJoinsPool' is true, it means that the caller
// will join the threadpool, and so the kernel needs to create one less thread.
// If 'callerJoinsPool' is false, we will still spawn a thread locally, and we should
// also tell the kernel to create one less thread than what was requested here.
//BINDER_SET_MAX_THREADS ioctl真正告诉内核有多少线程,
//它被允许生成,*除了*我们可能已经在本地生成的任何线程。
//如果“callerJoinsPool”为true,则表示调用方将加入线程池,因此内核需要少创建一个线程。
//如果“callerJoinsPool”为false,我们仍然会在本地生成一个线程,我们还应该告诉内核创建的线程比这里请求的少一个。
size_t kernelMaxThreads = maxThreads - 1;
if (ioctl(mDriverFD, BINDER_SET_MAX_THREADS, &kernelMaxThreads) != -1) {
AutoMutex _l(mLock);
mMaxThreads = maxThreads;
mSpawnThreadOnStart = !callerJoinsPool;
} else {
result = -errno;
ALOGE("Binder ioctl to set max threads failed: %s", strerror(-result));
}
return result;
}
来源:CSDN
作者:alexweng2009
链接:https://blog.csdn.net/alexweng2009/article/details/103682812