// 代码位于:/frameworks/base/core/libutils/RefBase.cpp voidRefBase::incStrong(constvoid* id)const { weakref_impl* const refs = mRefs; // 增加强引用 refs->incWeak(id); refs->addStrongRef(id); constint32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed); ALOG_ASSERT(c > 0, "incStrong() called on %p after last strong ref", refs); #if PRINT_REFS ALOGD("incStrong of %p from %p: cnt=%d\n", this, id, c); #endif if (c != INITIAL_STRONG_VALUE) { return; }
int32_t old __unused = refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE, std::memory_order_relaxed); // A decStrong() must still happen after us. ALOG_ASSERT(old > INITIAL_STRONG_VALUE, "0x%x too small", old); refs->mBase->onFirstRef(); }
// Adjust the timeout based on when the next message is due. if (timeoutMillis != 0 && mNextMessageUptime != LLONG_MAX) { nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); int messageTimeoutMillis = toMillisecondTimeoutDelay(now, mNextMessageUptime); if (messageTimeoutMillis >= 0 && (timeoutMillis < 0 || messageTimeoutMillis < timeoutMillis)) { timeoutMillis = messageTimeoutMillis; } #if DEBUG_POLL_AND_WAKE ALOGD("%p ~ pollOnce - next message in %" PRId64 "ns, adjusted timeout: timeoutMillis=%d", this, mNextMessageUptime - now, timeoutMillis); #endif }
// Poll. int result = POLL_WAKE; mResponses.clear(); mResponseIndex = 0;
// We are about to idle. // 即将处于idle状态 mPolling = true; // fd最大个数为16 structepoll_event eventItems[EPOLL_MAX_EVENTS]; //等待事件发生或者超时,在nativeWake()方法,向管道写端写入字符,则该方法会返回; //或者等待timeoutMillis之后。该方法就会返回 int eventCount = epoll_wait(mEpollFd.get(), eventItems, EPOLL_MAX_EVENTS, timeoutMillis);
// No longer idling. //不再处于idle状态 mPolling = false;
// Acquire lock. //请求锁 mLock.lock();
// Rebuild epoll set if needed. if (mEpollRebuildRequired) { mEpollRebuildRequired = false; // epoll重建,直接跳转Done; rebuildEpollLocked(); goto Done; }
// Check for poll error. if (eventCount < 0) { // epoll事件个数小于0,发生错误,直接跳转Done; if (errno == EINTR) { goto Done; } ALOGW("Poll failed with an unexpected error: %s", strerror(errno)); result = POLL_ERROR; goto Done; }
// Check for poll timeout. //epoll事件个数等于0,发生超时,直接跳转Done; if (eventCount == 0) { #if DEBUG_POLL_AND_WAKE ALOGD("%p ~ pollOnce - timeout", this); #endif result = POLL_TIMEOUT; goto Done; }
// Handle all events. #if DEBUG_POLL_AND_WAKE ALOGD("%p ~ pollOnce - handling events from %d fds", this, eventCount); #endif //循环遍历,处理所有的事件 for (int i = 0; i < eventCount; i++) { int fd = eventItems[i].data.fd; uint32_t epollEvents = eventItems[i].events; if (fd == mWakeEventFd.get()) { if (epollEvents & EPOLLIN) { //已经唤醒了,则读取并清空管道数据 awoken(); } else { ALOGW("Ignoring unexpected epoll events 0x%x on wake event fd.", epollEvents); } } else { ssize_t requestIndex = mRequests.indexOfKey(fd); if (requestIndex >= 0) { int events = 0; if (epollEvents & EPOLLIN) events |= EVENT_INPUT; if (epollEvents & EPOLLOUT) events |= EVENT_OUTPUT; if (epollEvents & EPOLLERR) events |= EVENT_ERROR; if (epollEvents & EPOLLHUP) events |= EVENT_HANGUP; //处理request,生成对应的reponse对象,push到响应数组 pushResponse(events, mRequests.valueAt(requestIndex)); } else { ALOGW("Ignoring unexpected epoll events 0x%x on fd %d that is " "no longer registered.", epollEvents, fd); } } } Done: ;
// Invoke pending message callbacks. // 再处理Native的Message,调用相应回调方法 mNextMessageUptime = LLONG_MAX; while (mMessageEnvelopes.size() != 0) { nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(0); if (messageEnvelope.uptime <= now) { // Remove the envelope from the list. // We keep a strong reference to the handler until the call to handleMessage // finishes. Then we drop it so that the handler can be deleted *before* // we reacquire our lock. { // obtain handler sp<MessageHandler> handler = messageEnvelope.handler; Message message = messageEnvelope.message; mMessageEnvelopes.removeAt(0); mSendingMessage = true; //再处理Native的Message,调用相应回调方法 mLock.unlock();
#if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS ALOGD("%p ~ pollOnce - sending message: handler=%p, what=%d", this, handler.get(), message.what); #endif // 处理消息事件 handler->handleMessage(message); } // release handler //请求锁 mLock.lock(); // 发生回调 mSendingMessage = false; result = POLL_CALLBACK; } else { // The last message left at the head of the queue determines the next wakeup time. mNextMessageUptime = messageEnvelope.uptime; break; } }
// Release lock. //释放锁 mLock.unlock();
// Invoke all response callbacks. // 处理带有Callback()方法的Response事件,执行Reponse相应的回调方法 for (size_t i = 0; i < mResponses.size(); i++) { Response& response = mResponses.editItemAt(i); if (response.request.ident == POLL_CALLBACK) { int fd = response.request.fd; int events = response.events; void* data = response.request.data; #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS ALOGD("%p ~ pollOnce - invoking fd event callback %p: fd=%d, events=0x%x, data=%p", this, response.request.callback.get(), fd, events, data); #endif // Invoke the callback. Note that the file descriptor may be closed by // the callback (and potentially even reused) before the function returns so // we need to be a little careful when removing the file descriptor afterwards. int callbackResult = response.request.callback->handleEvent(fd, events, data); if (callbackResult == 0) { //移除fd removeFd(fd, response.request.seq); }
// Clear the callback reference in the response structure promptly because we // will not clear the response vector itself until the next poll. //清除reponse引用的回调方法 response.request.callback.clear(); // 发生回调 result = POLL_CALLBACK; } } return result; }
// Disposes of the underlying message queue. // Must only be called on the looper thread or the finalizer. privatevoiddispose() { if (mPtr != 0) { nativeDestroy(mPtr); mPtr = 0; } }
// 代码位于:/frameworks/base/core/libutils/RefBase.cpp voidRefBase::decStrong(constvoid* id)const { weakref_impl* const refs = mRefs; //移除强引用 refs->removeStrongRef(id); constint32_t c = refs->mStrong.fetch_sub(1, std::memory_order_release); #if PRINT_REFS ALOGD("decStrong of %p from %p: cnt=%d\n", this, id, c); #endif LOG_ALWAYS_FATAL_IF(BAD_STRONG(c), "decStrong() called on %p too many times", refs); if (c == 1) { std::atomic_thread_fence(std::memory_order_acquire); refs->mBase->onLastStrongRef(id); int32_t flags = refs->mFlags.load(std::memory_order_relaxed); if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) { deletethis; // The destructor does not delete refs in this case. } } // Note that even with only strong reference operations, the thread // deallocating this may not be the same as the thread deallocating refs. // That's OK: all accesses to this happen before its deletion here, // and all accesses to refs happen before its deletion in the final decWeak. // The destructor can safely access mRefs because either it's deleting // mRefs itself, or it's running entirely before the final mWeak decrement. // // Since we're doing atomic loads of `flags`, the static analyzer assumes // they can change between `delete this;` and `refs->decWeak(id);`. This is // not the case. The analyzer may become more okay with this patten when // https://bugs.llvm.org/show_bug.cgi?id=34365 gets resolved. NOLINTNEXTLINE // 移除弱引用 refs->decWeak(id); }