bool ExtDisplayObserver::threadLoop() { static char uEventString[1024]; memset(uEventString, 0, sizeof(uEventString)); int count = uevent_next_event(uEventString, sizeof(uEventString)); if(count) { ALOGD_IF(EXT_OBSERVER_DEBUG, "%s: UeventString: %s len = %d", __FUNCTION__, uEventString, count); handleUEvent(uEventString); } return true; }
void PerfSource::run() { int pipefd[2]; pthread_t procThread; ProcThreadArgs procThreadArgs; { DynBuf printb; DynBuf b1; DynBuf b2; const uint64_t currTime = getTime(); // Start events before reading proc to avoid race conditions if (!mCountersGroup.start() || !mIdleGroup.start()) { logg->logError(__FILE__, __LINE__, "PerfGroup::start failed", __FUNCTION__, __FILE__, __LINE__); handleException(); } if (!readProcComms(currTime, &mBuffer, &printb, &b1, &b2)) { logg->logError(__FILE__, __LINE__, "readProcComms failed"); handleException(); } mBuffer.commit(currTime); // Postpone reading kallsyms as on android adb gets too backed up and data is lost procThreadArgs.mBuffer = &mBuffer; procThreadArgs.mCurrTime = currTime; procThreadArgs.mIsDone = false; if (pthread_create(&procThread, NULL, procFunc, &procThreadArgs)) { logg->logError(__FILE__, __LINE__, "pthread_create failed", __FUNCTION__, __FILE__, __LINE__); handleException(); } } if (pipe_cloexec(pipefd) != 0) { logg->logError(__FILE__, __LINE__, "pipe failed"); handleException(); } mInterruptFd = pipefd[1]; if (!mMonitor.add(pipefd[0])) { logg->logError(__FILE__, __LINE__, "Monitor::add failed"); handleException(); } int timeout = -1; if (gSessionData->mLiveRate > 0) { timeout = gSessionData->mLiveRate/NS_PER_MS; } sem_post(mStartProfile); while (gSessionData->mSessionIsActive) { // +1 for uevents, +1 for pipe struct epoll_event events[NR_CPUS + 2]; int ready = mMonitor.wait(events, ARRAY_LENGTH(events), timeout); if (ready < 0) { logg->logError(__FILE__, __LINE__, "Monitor::wait failed"); handleException(); } const uint64_t currTime = getTime(); for (int i = 0; i < ready; ++i) { if (events[i].data.fd == mUEvent.getFd()) { if (!handleUEvent(currTime)) { logg->logError(__FILE__, __LINE__, "PerfSource::handleUEvent failed"); handleException(); } break; } } // send a notification that data is ready sem_post(mSenderSem); // In one shot mode, stop collection once all the buffers are filled // Assume timeout == 0 in this case if (gSessionData->mOneShot && gSessionData->mSessionIsActive) { logg->logMessage("%s(%s:%i): One shot", __FUNCTION__, __FILE__, __LINE__); child->endSession(); } } procThreadArgs.mIsDone = true; pthread_join(procThread, NULL); mIdleGroup.stop(); mCountersGroup.stop(); mBuffer.setDone(); mIsDone = true; // send a notification that data is ready sem_post(mSenderSem); mInterruptFd = -1; close(pipefd[0]); close(pipefd[1]); }
void PerfSource::run() { int pipefd[2]; if (pipe(pipefd) != 0) { logg->logError(__FILE__, __LINE__, "pipe failed"); handleException(); } mInterruptFd = pipefd[1]; if (!mMonitor.add(pipefd[0])) { logg->logError(__FILE__, __LINE__, "Monitor::add failed"); handleException(); } int timeout = -1; if (gSessionData->mLiveRate > 0) { timeout = gSessionData->mLiveRate/MS_PER_US; } sem_post(mStartProfile); while (gSessionData->mSessionIsActive) { // +1 for uevents, +1 for pipe struct epoll_event events[NR_CPUS + 2]; int ready = mMonitor.wait(events, ARRAY_LENGTH(events), timeout); if (ready < 0) { logg->logError(__FILE__, __LINE__, "Monitor::wait failed"); handleException(); } for (int i = 0; i < ready; ++i) { if (events[i].data.fd == mUEvent.getFd()) { if (!handleUEvent()) { logg->logError(__FILE__, __LINE__, "PerfSource::handleUEvent failed"); handleException(); } break; } } // send a notification that data is ready sem_post(mSenderSem); // In one shot mode, stop collection once all the buffers are filled // Assume timeout == 0 in this case if (gSessionData->mOneShot && gSessionData->mSessionIsActive) { logg->logMessage("%s(%s:%i): One shot", __FUNCTION__, __FILE__, __LINE__); child->endSession(); } } mCountersGroup.stop(); mBuffer.setDone(); mIsDone = true; // send a notification that data is ready sem_post(mSenderSem); mInterruptFd = -1; close(pipefd[0]); close(pipefd[1]); }
void PerfSource::run() { int pipefd[2]; pthread_t procThread; ProcThreadArgs procThreadArgs; if (lib::pipe_cloexec(pipefd) != 0) { logg.logError("pipe failed"); handleException(); } mInterruptFd = pipefd[1]; if (!mMonitor.add(pipefd[0])) { logg.logError("Monitor::add failed"); handleException(); } { DynBuf printb; DynBuf b1; const uint64_t currTime = getTime() - gSessionData.mMonotonicStarted; logg.logMessage("run at current time: %" PRIu64, currTime); // Start events before reading proc to avoid race conditions if (!enableOnCommandExec) { mCountersGroup.start(); } mAttrsBuffer->perfCounterHeader(currTime); for (size_t cpu = 0; cpu < mCpuInfo.getNumberOfCores(); ++cpu) { mDriver.read(*mAttrsBuffer, cpu); } mAttrsBuffer->perfCounterFooter(currTime); if (!readProcSysDependencies(currTime, *mAttrsBuffer, &printb, &b1, mFtraceDriver)) { if (mDriver.getConfig().is_system_wide) { logg.logError("readProcSysDependencies failed"); handleException(); } else { logg.logMessage("readProcSysDependencies failed"); } } mAttrsBuffer->commit(currTime); // Postpone reading kallsyms as on android adb gets too backed up and data is lost procThreadArgs.mAttrsBuffer = mAttrsBuffer; procThreadArgs.mCurrTime = currTime; procThreadArgs.mIsDone = false; if (pthread_create(&procThread, NULL, procFunc, &procThreadArgs)) { logg.logError("pthread_create failed"); handleException(); } } // monitor online cores if no uevents std::unique_ptr<PerfCpuOnlineMonitor> onlineMonitorThread; if (!mUEvent.enabled()) { onlineMonitorThread.reset(new PerfCpuOnlineMonitor([&](unsigned cpu, bool online) -> void { logg.logMessage("CPU online state changed: %u -> %s", cpu, (online ? "online" : "offline")); const uint64_t currTime = getTime() - gSessionData.mMonotonicStarted; if (online) { handleCpuOnline(currTime, cpu); } else { handleCpuOffline(currTime, cpu); } })); } // start sync threads mSyncThreads = PerfSyncThreadBuffer::create(gSessionData.mMonotonicStarted, this->mDriver.getConfig().has_attr_clockid_support, this->mCountersGroup.hasSPE(), mSenderSem); // start profiling sem_post(&mStartProfile); const uint64_t NO_RATE = ~0ULL; const uint64_t rate = gSessionData.mLiveRate > 0 && gSessionData.mSampleRate > 0 ? gSessionData.mLiveRate : NO_RATE; uint64_t nextTime = 0; int timeout = rate != NO_RATE ? 0 : -1; while (gSessionData.mSessionIsActive) { // +1 for uevents, +1 for pipe std::vector<struct epoll_event> events {mCpuInfo.getNumberOfCores() + 2}; int ready = mMonitor.wait(events.data(), events.size(), timeout); if (ready < 0) { logg.logError("Monitor::wait failed"); handleException(); } const uint64_t currTime = getTime() - gSessionData.mMonotonicStarted; for (int i = 0; i < ready; ++i) { if (events[i].data.fd == mUEvent.getFd()) { if (!handleUEvent(currTime)) { logg.logError("PerfSource::handleUEvent failed"); handleException(); } break; } } // send a notification that data is ready sem_post(&mSenderSem); // In one shot mode, stop collection once all the buffers are filled if (gSessionData.mOneShot && gSessionData.mSessionIsActive && ((mSummary.bytesAvailable() <= 0) || (mAttrsBuffer->bytesAvailable() <= 0) || mCountersBuf.isFull())) { logg.logMessage("One shot (perf)"); mChild.endSession(); } if (rate != NO_RATE) { while (currTime > nextTime) { nextTime += rate; } // + NS_PER_MS - 1 to ensure always rounding up timeout = std::max<int>( 0, (nextTime + NS_PER_MS - 1 - getTime() + gSessionData.mMonotonicStarted) / NS_PER_MS); } } if (onlineMonitorThread) { onlineMonitorThread->terminate(); } procThreadArgs.mIsDone = true; pthread_join(procThread, NULL); mCountersGroup.stop(); mAttrsBuffer->setDone(); mIsDone = true; // terminate all remaining sync threads for (auto & ptr : mSyncThreads) { ptr->terminate(); } // send a notification that data is ready sem_post(&mSenderSem); mInterruptFd = -1; close(pipefd[0]); close(pipefd[1]); }