int Looper::pollAll(int timeoutMillis, int* outFd, int* outEvents, void** outData) { if (timeoutMillis <= 0) { int result; do { result = pollOnce(timeoutMillis, outFd, outEvents, outData); } while (result == ALOOPER_POLL_CALLBACK); return result; } else { nsecs_t endTime = systemTime(SYSTEM_TIME_MONOTONIC) + milliseconds_to_nanoseconds(timeoutMillis); for (;;) { int result = pollOnce(timeoutMillis, outFd, outEvents, outData); if (result != ALOOPER_POLL_CALLBACK) { return result; } nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); timeoutMillis = toMillisecondTimeoutDelay(now, endTime); if (timeoutMillis == 0) { return ALOOPER_POLL_TIMEOUT; } } } }
static void _logUsageLocked() { nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); if (now > s_nextLog) { s_nextLog = now + milliseconds_to_nanoseconds(10); ALOGV("Total memory usage: %zu kb", s_totalAllocations / 1024); } }
status_t android_view_KeyEvent_toNative(JNIEnv* env, jobject eventObj, KeyEvent* event) { jint deviceId = env->GetIntField(eventObj, gKeyEventClassInfo.mDeviceId); jint source = env->GetIntField(eventObj, gKeyEventClassInfo.mSource); jint metaState = env->GetIntField(eventObj, gKeyEventClassInfo.mMetaState); jint action = env->GetIntField(eventObj, gKeyEventClassInfo.mAction); jint keyCode = env->GetIntField(eventObj, gKeyEventClassInfo.mKeyCode); jint scanCode = env->GetIntField(eventObj, gKeyEventClassInfo.mScanCode); jint repeatCount = env->GetIntField(eventObj, gKeyEventClassInfo.mRepeatCount); jint flags = env->GetIntField(eventObj, gKeyEventClassInfo.mFlags); jlong downTime = env->GetLongField(eventObj, gKeyEventClassInfo.mDownTime); jlong eventTime = env->GetLongField(eventObj, gKeyEventClassInfo.mEventTime); event->initialize(deviceId, source, action, flags, keyCode, scanCode, metaState, repeatCount, milliseconds_to_nanoseconds(downTime), milliseconds_to_nanoseconds(eventTime)); return OK; }
int Looper::pollInner(int timeoutMillis) { #if DEBUG_POLL_AND_WAKE LOGD("%p ~ pollOnce - waiting: timeoutMillis=%d", this, timeoutMillis); #endif // Adjust the timeout based on when the next message is due. if (timeoutMillis != 0 && mNextMessageUptime != LLONG_MAX) { nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); int messageTimeoutMillis = toMillisecondTimeoutDelay(now, mNextMessageUptime); if (messageTimeoutMillis >= 0 && (timeoutMillis < 0 || messageTimeoutMillis < timeoutMillis)) { timeoutMillis = messageTimeoutMillis; } #if DEBUG_POLL_AND_WAKE LOGD("%p ~ pollOnce - next message in %lldns, adjusted timeout: timeoutMillis=%d", this, mNextMessageUptime - now, timeoutMillis); #endif } // Poll. int result = ALOOPER_POLL_WAKE; mResponses.clear(); mResponseIndex = 0; #ifdef LOOPER_STATISTICS nsecs_t pollStartTime = systemTime(SYSTEM_TIME_MONOTONIC); #endif #ifdef LOOPER_USES_EPOLL struct epoll_event eventItems[EPOLL_MAX_EVENTS]; int eventCount = epoll_wait(mEpollFd, eventItems, EPOLL_MAX_EVENTS, timeoutMillis); #else // Wait for wakeAndLock() waiters to run then set mPolling to true. mLock.lock(); while (mWaiters != 0) { mResume.wait(mLock); } mPolling = true; mLock.unlock(); size_t requestedCount = mRequestedFds.size(); int eventCount = poll(mRequestedFds.editArray(), requestedCount, timeoutMillis); #endif // Acquire lock. mLock.lock(); // Check for poll error. if (eventCount < 0) { if (errno == EINTR) { goto Done; } LOGW("Poll failed with an unexpected error, errno=%d", errno); result = ALOOPER_POLL_ERROR; goto Done; } // Check for poll timeout. if (eventCount == 0) { #if DEBUG_POLL_AND_WAKE LOGD("%p ~ pollOnce - timeout", this); #endif result = ALOOPER_POLL_TIMEOUT; goto Done; } // Handle all events. #if DEBUG_POLL_AND_WAKE LOGD("%p ~ pollOnce - handling events from %d fds", this, eventCount); #endif #ifdef LOOPER_USES_EPOLL for (int i = 0; i < eventCount; i++) { int fd = eventItems[i].data.fd; uint32_t epollEvents = eventItems[i].events; if (fd == mWakeReadPipeFd) { if (epollEvents & EPOLLIN) { awoken(); } else { LOGW("Ignoring unexpected epoll events 0x%x on wake read pipe.", epollEvents); } } else { ssize_t requestIndex = mRequests.indexOfKey(fd); if (requestIndex >= 0) { int events = 0; if (epollEvents & EPOLLIN) events |= ALOOPER_EVENT_INPUT; if (epollEvents & EPOLLOUT) events |= ALOOPER_EVENT_OUTPUT; if (epollEvents & EPOLLERR) events |= ALOOPER_EVENT_ERROR; if (epollEvents & EPOLLHUP) events |= ALOOPER_EVENT_HANGUP; pushResponse(events, mRequests.valueAt(requestIndex)); } else { LOGW("Ignoring unexpected epoll events 0x%x on fd %d that is " "no longer registered.", epollEvents, fd); } } } Done: ; #else for (size_t i = 0; i < requestedCount; i++) { const struct pollfd& requestedFd = mRequestedFds.itemAt(i); short pollEvents = requestedFd.revents; if (pollEvents) { if (requestedFd.fd == mWakeReadPipeFd) { if (pollEvents & POLLIN) { awoken(); } else { LOGW("Ignoring unexpected poll events 0x%x on wake read pipe.", pollEvents); } } else { int events = 0; if (pollEvents & POLLIN) events |= ALOOPER_EVENT_INPUT; if (pollEvents & POLLOUT) events |= ALOOPER_EVENT_OUTPUT; if (pollEvents & POLLERR) events |= ALOOPER_EVENT_ERROR; if (pollEvents & POLLHUP) events |= ALOOPER_EVENT_HANGUP; if (pollEvents & POLLNVAL) events |= ALOOPER_EVENT_INVALID; pushResponse(events, mRequests.itemAt(i)); } if (--eventCount == 0) { break; } } } Done: // Set mPolling to false and wake up the wakeAndLock() waiters. mPolling = false; if (mWaiters != 0) { mAwake.broadcast(); } #endif #ifdef LOOPER_STATISTICS nsecs_t pollEndTime = systemTime(SYSTEM_TIME_MONOTONIC); mSampledPolls += 1; if (timeoutMillis == 0) { mSampledZeroPollCount += 1; mSampledZeroPollLatencySum += pollEndTime - pollStartTime; } else if (timeoutMillis > 0 && result == ALOOPER_POLL_TIMEOUT) { mSampledTimeoutPollCount += 1; mSampledTimeoutPollLatencySum += pollEndTime - pollStartTime - milliseconds_to_nanoseconds(timeoutMillis); } if (mSampledPolls == SAMPLED_POLLS_TO_AGGREGATE) { LOGD("%p ~ poll latency statistics: %0.3fms zero timeout, %0.3fms non-zero timeout", this, 0.000001f * float(mSampledZeroPollLatencySum) / mSampledZeroPollCount, 0.000001f * float(mSampledTimeoutPollLatencySum) / mSampledTimeoutPollCount); mSampledPolls = 0; mSampledZeroPollCount = 0; mSampledZeroPollLatencySum = 0; mSampledTimeoutPollCount = 0; mSampledTimeoutPollLatencySum = 0; } #endif // Invoke pending message callbacks. mNextMessageUptime = LLONG_MAX; while (mMessageEnvelopes.size() != 0) { nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(0); if (messageEnvelope.uptime <= now) { // Remove the envelope from the list. // We keep a strong reference to the handler until the call to handleMessage // finishes. Then we drop it so that the handler can be deleted *before* // we reacquire our lock. { // obtain handler sp<MessageHandler> handler = messageEnvelope.handler; Message message = messageEnvelope.message; mMessageEnvelopes.removeAt(0); mSendingMessage = true; mLock.unlock(); #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS LOGD("%p ~ pollOnce - sending message: handler=%p, what=%d", this, handler.get(), message.what); #endif handler->handleMessage(message); } // release handler mLock.lock(); mSendingMessage = false; result = ALOOPER_POLL_CALLBACK; } else { // The last message left at the head of the queue determines the next wakeup time. mNextMessageUptime = messageEnvelope.uptime; break; } } // Release lock. mLock.unlock(); // Invoke all response callbacks. for (size_t i = 0; i < mResponses.size(); i++) { const Response& response = mResponses.itemAt(i); ALooper_callbackFunc callback = response.request.callback; if (callback) { int fd = response.request.fd; int events = response.events; void* data = response.request.data; #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS LOGD("%p ~ pollOnce - invoking fd event callback %p: fd=%d, events=0x%x, data=%p", this, callback, fd, events, data); #endif int callbackResult = callback(fd, events, data); if (callbackResult == 0) { removeFd(fd); } result = ALOOPER_POLL_CALLBACK; } } return result; }
namespace renderthread { // Number of events to read at a time from the DisplayEventReceiver pipe. // The value should be large enough that we can quickly drain the pipe // using just a few large reads. static const size_t EVENT_BUFFER_SIZE = 100; // Slight delay to give the UI time to push us a new frame before we replay static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4); TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {} RenderTask* TaskQueue::next() { RenderTask* ret = mHead; if (ret) { mHead = ret->mNext; if (!mHead) { mTail = nullptr; } ret->mNext = nullptr; } return ret; } RenderTask* TaskQueue::peek() { return mHead; } void TaskQueue::queue(RenderTask* task) { // Since the RenderTask itself forms the linked list it is not allowed // to have the same task queued twice LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!"); if (mTail) { // Fast path if we can just append if (mTail->mRunAt <= task->mRunAt) { mTail->mNext = task; mTail = task; } else { // Need to find the proper insertion point RenderTask* previous = nullptr; RenderTask* next = mHead; while (next && next->mRunAt <= task->mRunAt) { previous = next; next = next->mNext; } if (!previous) { task->mNext = mHead; mHead = task; } else { previous->mNext = task; if (next) { task->mNext = next; } else { mTail = task; } } } } else { mTail = mHead = task; } } void TaskQueue::queueAtFront(RenderTask* task) { if (mTail) { task->mNext = mHead; mHead = task; } else { mTail = mHead = task; } } void TaskQueue::remove(RenderTask* task) { // TaskQueue is strict here to enforce that users are keeping track of // their RenderTasks due to how their memory is managed LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task, "Cannot remove a task that isn't in the queue!"); // If task is the head we can just call next() to pop it off // Otherwise we need to scan through to find the task before it if (peek() == task) { next(); } else { RenderTask* previous = mHead; while (previous->mNext != task) { previous = previous->mNext; } previous->mNext = task->mNext; if (mTail == task) { mTail = previous; } } } class DispatchFrameCallbacks : public RenderTask { private: RenderThread* mRenderThread; public: DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {} virtual void run() override { mRenderThread->dispatchFrameCallbacks(); } }; static bool gHasRenderThreadInstance = false; bool RenderThread::hasInstance() { return gHasRenderThreadInstance; } RenderThread& RenderThread::getInstance() { // This is a pointer because otherwise __cxa_finalize // will try to delete it like a Good Citizen but that causes us to crash // because we don't want to delete the RenderThread normally. static RenderThread* sInstance = new RenderThread(); gHasRenderThreadInstance = true; return *sInstance; } RenderThread::RenderThread() : Thread(true) , mNextWakeup(LLONG_MAX) , mDisplayEventReceiver(nullptr) , mVsyncRequested(false) , mFrameCallbackTaskPending(false) , mFrameCallbackTask(nullptr) , mRenderState(nullptr) , mEglManager(nullptr) { Properties::load(); mFrameCallbackTask = new DispatchFrameCallbacks(this); mLooper = new Looper(false); run("RenderThread"); } RenderThread::~RenderThread() { LOG_ALWAYS_FATAL("Can't destroy the render thread"); } void RenderThread::initializeDisplayEventReceiver() { LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?"); mDisplayEventReceiver = new DisplayEventReceiver(); status_t status = mDisplayEventReceiver->initCheck(); LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver " "failed with status: %d", status); // Register the FD mLooper->addFd(mDisplayEventReceiver->getFd(), 0, Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this); } void RenderThread::initThreadLocals() { sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay( ISurfaceComposer::eDisplayIdMain)); status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo); LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n"); nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps); mTimeLord.setFrameInterval(frameIntervalNanos); initializeDisplayEventReceiver(); mEglManager = new EglManager(*this); mRenderState = new RenderState(*this); mJankTracker = new JankTracker(frameIntervalNanos); } int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) { if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) { ALOGE("Display event receiver pipe was closed or an error occurred. " "events=0x%x", events); return 0; // remove the callback } if (!(events & Looper::EVENT_INPUT)) { ALOGW("Received spurious callback for unhandled poll event. " "events=0x%x", events); return 1; // keep the callback } reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue(); return 1; // keep the callback } static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) { DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE]; nsecs_t latest = 0; ssize_t n; while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) { for (ssize_t i = 0; i < n; i++) { const DisplayEventReceiver::Event& ev = buf[i]; switch (ev.header.type) { case DisplayEventReceiver::DISPLAY_EVENT_VSYNC: latest = ev.header.timestamp; break; } } } if (n < 0) { ALOGW("Failed to get events from display event receiver, status=%d", status_t(n)); } return latest; } void RenderThread::drainDisplayEventQueue() { ATRACE_CALL(); nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver); if (vsyncEvent > 0) { mVsyncRequested = false; if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) { ATRACE_NAME("queue mFrameCallbackTask"); mFrameCallbackTaskPending = true; nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY); queueAt(mFrameCallbackTask, runAt); } } } void RenderThread::dispatchFrameCallbacks() { ATRACE_CALL(); mFrameCallbackTaskPending = false; std::set<IFrameCallback*> callbacks; mFrameCallbacks.swap(callbacks); if (callbacks.size()) { // Assume one of them will probably animate again so preemptively // request the next vsync in case it occurs mid-frame requestVsync(); for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) { (*it)->doFrame(); } } } void RenderThread::requestVsync() { if (!mVsyncRequested) { mVsyncRequested = true; status_t status = mDisplayEventReceiver->requestNextVsync(); LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "requestNextVsync failed with status: %d", status); } } bool RenderThread::threadLoop() { setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY); initThreadLocals(); int timeoutMillis = -1; for (;;) { int result = mLooper->pollOnce(timeoutMillis); LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR, "RenderThread Looper POLL_ERROR!"); nsecs_t nextWakeup; // Process our queue, if we have anything while (RenderTask* task = nextTask(&nextWakeup)) { task->run(); // task may have deleted itself, do not reference it again } if (nextWakeup == LLONG_MAX) { timeoutMillis = -1; } else { nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC); timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos); if (timeoutMillis < 0) { timeoutMillis = 0; } } if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) { drainDisplayEventQueue(); mFrameCallbacks.insert( mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end()); mPendingRegistrationFrameCallbacks.clear(); requestVsync(); } if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) { // TODO: Clean this up. This is working around an issue where a combination // of bad timing and slow drawing can result in dropping a stale vsync // on the floor (correct!) but fails to schedule to listen for the // next vsync (oops), so none of the callbacks are run. requestVsync(); } } return false; } void RenderThread::queue(RenderTask* task) { AutoMutex _lock(mLock); mQueue.queue(task); if (mNextWakeup && task->mRunAt < mNextWakeup) { mNextWakeup = 0; mLooper->wake(); } } void RenderThread::queueAndWait(RenderTask* task) { // These need to be local to the thread to avoid the Condition // signaling the wrong thread. The easiest way to achieve that is to just // make this on the stack, although that has a slight cost to it Mutex mutex; Condition condition; SignalingRenderTask syncTask(task, &mutex, &condition); AutoMutex _lock(mutex); queue(&syncTask); condition.wait(mutex); } void RenderThread::queueAtFront(RenderTask* task) { AutoMutex _lock(mLock); mQueue.queueAtFront(task); mLooper->wake(); } void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) { task->mRunAt = runAtNs; queue(task); } void RenderThread::remove(RenderTask* task) { AutoMutex _lock(mLock); mQueue.remove(task); } void RenderThread::postFrameCallback(IFrameCallback* callback) { mPendingRegistrationFrameCallbacks.insert(callback); } bool RenderThread::removeFrameCallback(IFrameCallback* callback) { size_t erased; erased = mFrameCallbacks.erase(callback); erased |= mPendingRegistrationFrameCallbacks.erase(callback); return erased; } void RenderThread::pushBackFrameCallback(IFrameCallback* callback) { if (mFrameCallbacks.erase(callback)) { mPendingRegistrationFrameCallbacks.insert(callback); } } RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) { AutoMutex _lock(mLock); RenderTask* next = mQueue.peek(); if (!next) { mNextWakeup = LLONG_MAX; } else { mNextWakeup = next->mRunAt; // Most tasks won't be delayed, so avoid unnecessary systemTime() calls if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) { next = mQueue.next(); } else { next = nullptr; } } if (nextWakeup) { *nextWakeup = mNextWakeup; } return next; } } /* namespace renderthread */
int AmlogicPlayerStreamSource::Source_read(unsigned char *buf, int size) { sp<AMessage> extra; ssize_t n =AVERROR(EAGAIN); int bufelselen = size; int retry=0; unsigned char *pbuf=buf; int waitretry=1000; /*100s*/ int oncereadmax=188*10; int readlen=0; while(mNeedMoreDataSize<=0){ /* if low level buffer is have enought data, block on read, the netlfix.apk don't like we have buffer on player. */ ///usleep(1000*100);/*10ms *100 =1S,same as tcp read*/ LOGI("Read wait::feedMoreData =%d,pos=%lld,waitretry=%d\n",mNeedMoreDataSize,pos,waitretry); mMoreDataLock.lock(); mWaitCondition.waitRelative(mMoreDataLock,milliseconds_to_nanoseconds(10)); mMoreDataLock.unlock(); if(url_interrupt_cb()){ return AVERROR_EXIT; } if(waitretry--<=0) return AVERROR(EAGAIN); } while(oncereadmax>0&&bufelselen > 0 && !url_interrupt_cb() ){ char *buffer=localbuf; int rlen; int newread=0; if(localdatasize>0){ n=localdatasize; }else{ n = mStreamListener->read(buffer, 188, &extra); newread=1; if(n>0) localdatasize=n; } if(n>0){ if (newread && buffer[0] == 0x00) { //FIXME if(buffer[1]==0x00) ;///DISCONTINUITY_SEEK else ;///DISCONTINUITY_FORMATCHANGE LOGV("DISCONTINUITY_SEEK=%d *****", n); continue;//to next packets } rlen=MIN(n,bufelselen); memcpy(pbuf,buffer,rlen); pbuf+=rlen; bufelselen-=rlen; oncereadmax-=rlen; if(n>bufelselen){ /*read buf is small than 188*/ localdatasize=n-rlen; memmove(buffer,buffer+rlen,localdatasize); }else{ localdatasize=0; } }else if(n==-11 && retry++<200){ usleep(1000*10);/*10ms *100 =1S,same as tcp read*/ n= AVERROR(EAGAIN); if((size-bufelselen)!=0) break;/*have read data before,return first*/ }else{ if(n==INFO_DISCONTINUITY){ LOGI("STREAM INFO DISCONTINUITY message=%d\n", n); continue;/*ignore this INFO,FIXME*/ }else if(n==INFO_FORMAT_CHANGED){ LOGI("STREAM INFO INFO_FORMAT_CHANGED message=%d\n", n); continue;/*ignore this INFO,FIXME*/ }else if(n==-11 ) n= AVERROR(EAGAIN); LOGV("Source_read error=%d"); break;//errors } LOGV(" Source_read=%d,retry=%d", n,retry); } #ifdef DUMP_DATA if(dumpfd>=0 && (size-bufelselen)>0){ write(dumpfd,buf,(size-bufelselen)); } #endif readlen=(size-bufelselen); if(readlen>0){/*readed data,lock and del readed size*/ Mutex::Autolock autoLock(mMoreDataLock); mNeedMoreDataSize-=readlen; pos+=readlen; } return readlen>0?readlen: n; }