virtual bool threadLoop() { buffer_handle_t handle; { // Scope for mutex Mutex::Autolock lock(sMutex); while (mQueue.isEmpty()) { mQueuedCondition.wait(sMutex); } handle = mQueue[0]; } status_t err; GraphicBufferAllocator& gba(GraphicBufferAllocator::get()); { // Scope for tracing ATRACE_NAME("gralloc::free"); err = gba.mAllocDev->free(gba.mAllocDev, handle); } ALOGW_IF(err, "free(...) failed %d (%s)", err, strerror(-err)); if (err == NO_ERROR) { Mutex::Autolock _l(GraphicBufferAllocator::sLock); KeyedVector<buffer_handle_t, GraphicBufferAllocator::alloc_rec_t>& list(GraphicBufferAllocator::sAllocList); list.removeItem(handle); } { // Scope for mutex Mutex::Autolock lock(sMutex); mQueue.removeAt(0); mFreedCondition.broadcast(); } return true; }
void PathCache::PathProcessor::onProcess(const sp<Task<SkBitmap*> >& task) { PathTask* t = static_cast<PathTask*>(task.get()); ATRACE_NAME("pathPrecache"); float left, top, offset; uint32_t width, height; PathCache::computePathBounds(&t->path, &t->paint, left, top, offset, width, height); PathTexture* texture = t->texture; texture->left = left; texture->top = top; texture->offset = offset; texture->width = width; texture->height = height; if (width <= mMaxTextureSize && height <= mMaxTextureSize) { SkBitmap* bitmap = new SkBitmap(); drawPath(&t->path, &t->paint, *bitmap, left, top, offset, width, height); t->setResult(bitmap); } else { texture->width = 0; texture->height = 0; t->setResult(NULL); } }
void DrawFrameTask::run() { ATRACE_NAME("DrawFrame"); mContext->profiler().setDensity(mDensity); mContext->profiler().startFrame(mRecordDurationNanos); bool canUnblockUiThread; bool canDrawThisFrame; { TreeInfo info(TreeInfo::MODE_FULL, mRenderThread->renderState()); canUnblockUiThread = syncFrameState(info); canDrawThisFrame = info.out.canDrawThisFrame; } // Grab a copy of everything we need CanvasContext* context = mContext; // From this point on anything in "this" is *UNSAFE TO ACCESS* if (canUnblockUiThread) { unblockUiThread(); } if (CC_LIKELY(canDrawThisFrame)) { context->draw(); } if (!canUnblockUiThread) { unblockUiThread(); } }
bool EglManager::swapBuffers(const Frame& frame, const SkRect& screenDirty) { if (CC_UNLIKELY(Properties::waitForGpuCompletion)) { ATRACE_NAME("Finishing GPU work"); fence(); } EGLint rects[4]; frame.map(screenDirty, rects); eglSwapBuffersWithDamageKHR(mEglDisplay, frame.mSurface, rects, screenDirty.isEmpty() ? 0 : 1); EGLint err = eglGetError(); if (CC_LIKELY(err == EGL_SUCCESS)) { return true; } if (err == EGL_BAD_SURFACE || err == EGL_BAD_NATIVE_WINDOW) { // For some reason our surface was destroyed out from under us // This really shouldn't happen, but if it does we can recover easily // by just not trying to use the surface anymore ALOGW("swapBuffers encountered EGL error %d on %p, halting rendering...", err, frame.mSurface); return false; } LOG_ALWAYS_FATAL("Encountered EGL error %d %s during rendering", err, egl_error_str(err)); // Impossible to hit this, but the compiler doesn't know that return false; }
void DisplayList::defer(DeferStateStruct& deferStruct, const int level) { ATRACE_NAME(mName.string()); DeferOperationHandler handler(deferStruct, level); iterate<DeferOperationHandler>(deferStruct.mRenderer, handler, level); DISPLAY_LIST_LOGD("%*sDone (%p, %s)", level * 2, "", this, mName.string()); }
virtual void onProcess(const sp<Task<TessellationCache::vertexBuffer_pair_t*> >& task) { ShadowTask* t = static_cast<ShadowTask*>(task.get()); ATRACE_NAME("shadow tessellation"); VertexBuffer* ambientBuffer = new VertexBuffer; VertexBuffer* spotBuffer = new VertexBuffer; tessellateShadows(&t->drawTransform, &t->localClip, t->opaque, &t->casterPerimeter, &t->transformXY, &t->transformZ, t->lightCenter, t->lightRadius, *ambientBuffer, *spotBuffer); t->setResult(new TessellationCache::vertexBuffer_pair_t(ambientBuffer, spotBuffer)); }
void RenderThread::drainDisplayEventQueue() { ATRACE_CALL(); nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver); if (vsyncEvent > 0) { mVsyncRequested = false; if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) { ATRACE_NAME("queue mFrameCallbackTask"); mFrameCallbackTaskPending = true; nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY); queueAt(mFrameCallbackTask, runAt); } } }
virtual void onPositionLost(RenderNode& node, const TreeInfo* info) override { if (CC_UNLIKELY(!mWeakRef || (info && !info->updateWindowPositions))) return; ATRACE_NAME("SurfaceView position lost"); JNIEnv* env = jnienv(); jobject localref = env->NewLocalRef(mWeakRef); if (CC_UNLIKELY(!localref)) { jnienv()->DeleteWeakGlobalRef(mWeakRef); mWeakRef = nullptr; return; } env->CallVoidMethod(localref, gSurfaceViewPositionLostMethod, info ? info->canvasContext.getFrameNumber() : 0); env->DeleteLocalRef(localref); }
bool Layer::resize(const uint32_t width, const uint32_t height) { uint32_t desiredWidth = computeIdealWidth(width); uint32_t desiredHeight = computeIdealWidth(height); if (desiredWidth <= getWidth() && desiredHeight <= getHeight()) { return true; } ATRACE_NAME("resizeLayer"); const uint32_t maxTextureSize = caches.maxTextureSize; if (desiredWidth > maxTextureSize || desiredHeight > maxTextureSize) { ALOGW("Layer exceeds max. dimensions supported by the GPU (%dx%d, max=%dx%d)", desiredWidth, desiredHeight, maxTextureSize, maxTextureSize); return false; } uint32_t oldWidth = getWidth(); uint32_t oldHeight = getHeight(); setSize(desiredWidth, desiredHeight); if (fbo) { caches.textureState().activateTexture(0); bindTexture(); allocateTexture(); if (glGetError() != GL_NO_ERROR) { setSize(oldWidth, oldHeight); return false; } } if (stencil) { stencil->bind(); stencil->resize(desiredWidth, desiredHeight); if (glGetError() != GL_NO_ERROR) { setSize(oldWidth, oldHeight); return false; } } return true; }
void doUpdatePositionAsync(jlong frameNumber, jint left, jint top, jint right, jint bottom) { ATRACE_NAME("Update SurfaceView position"); JNIEnv* env = jnienv(); jobject localref = env->NewLocalRef(mWeakRef); if (CC_UNLIKELY(!localref)) { env->DeleteWeakGlobalRef(mWeakRef); mWeakRef = nullptr; } else { env->CallVoidMethod(localref, gSurfaceViewPositionUpdateMethod, frameNumber, left, top, right, bottom); env->DeleteLocalRef(localref); } // We need to release ourselves here decStrong(0); }
void Font::precache(const SkPaint* paint, const char* text, int numGlyphs) { ATRACE_NAME("Precache Glyphs"); if (numGlyphs == 0 || text == NULL) { return; } int glyphsCount = 0; while (glyphsCount < numGlyphs) { glyph_t glyph = GET_GLYPH(text); // Reached the end of the string if (IS_END_OF_STRING(glyph)) { break; } CachedGlyphInfo* cachedGlyph = getCachedGlyph(paint, glyph, true); glyphsCount++; } }
GLuint Program::buildShader(const char* source, GLenum type) { ATRACE_NAME("Build GL Shader"); GLuint shader = glCreateShader(type); glShaderSource(shader, 1, &source, nullptr); glCompileShader(shader); GLint status; glGetShaderiv(shader, GL_COMPILE_STATUS, &status); if (status != GL_TRUE) { ALOGE("Error while compiling this shader:\n===\n%s\n===", source); // Some drivers return wrong values for GL_INFO_LOG_LENGTH // use a fixed size instead GLchar log[512]; glGetShaderInfoLog(shader, sizeof(log), nullptr, &log[0]); LOG_ALWAYS_FATAL("Shader info log: %s", log); return 0; } return shader; }
void AssetAtlas::init(sp<GraphicBuffer> buffer, int64_t* map, int count) { if (mImage) { return; } ATRACE_NAME("AssetAtlas::init"); mImage = new Image(buffer); if (mImage->getTexture()) { if (!mTexture) { Caches& caches = Caches::getInstance(); mTexture = new Texture(caches); mTexture->wrap(mImage->getTexture(), buffer->getWidth(), buffer->getHeight(), GL_RGBA); createEntries(caches, map, count); } } else { ALOGW("Could not create atlas image"); terminate(); } }
status_t BufferQueueConsumer::acquireBuffer(BufferItem* outBuffer, nsecs_t expectedPresent) { ATRACE_CALL(); Mutex::Autolock lock(mCore->mMutex); // Check that the consumer doesn't currently have the maximum number of // buffers acquired. We allow the max buffer count to be exceeded by one // buffer so that the consumer can successfully set up the newly acquired // buffer before releasing the old one. int numAcquiredBuffers = 0; for (int s = 0; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) { if (mSlots[s].mBufferState == BufferSlot::ACQUIRED) { ++numAcquiredBuffers; } } if (numAcquiredBuffers >= mCore->mMaxAcquiredBufferCount + 1) { BQ_LOGE("acquireBuffer: max acquired buffer count reached: %d (max %d)", numAcquiredBuffers, mCore->mMaxAcquiredBufferCount); return INVALID_OPERATION; } // Check if the queue is empty. // In asynchronous mode the list is guaranteed to be one buffer deep, // while in synchronous mode we use the oldest buffer. if (mCore->mQueue.empty()) { return NO_BUFFER_AVAILABLE; } BufferQueueCore::Fifo::iterator front(mCore->mQueue.begin()); // If expectedPresent is specified, we may not want to return a buffer yet. // If it's specified and there's more than one buffer queued, we may want // to drop a buffer. if (expectedPresent != 0) { const int MAX_REASONABLE_NSEC = 1000000000ULL; // 1 second // The 'expectedPresent' argument indicates when the buffer is expected // to be presented on-screen. If the buffer's desired present time is // earlier (less) than expectedPresent -- meaning it will be displayed // on time or possibly late if we show it as soon as possible -- we // acquire and return it. If we don't want to display it until after the // expectedPresent time, we return PRESENT_LATER without acquiring it. // // To be safe, we don't defer acquisition if expectedPresent is more // than one second in the future beyond the desired present time // (i.e., we'd be holding the buffer for a long time). // // NOTE: Code assumes monotonic time values from the system clock // are positive. // Start by checking to see if we can drop frames. We skip this check if // the timestamps are being auto-generated by Surface. If the app isn't // generating timestamps explicitly, it probably doesn't want frames to // be discarded based on them. while (mCore->mQueue.size() > 1 && !mCore->mQueue[0].mIsAutoTimestamp) { // If entry[1] is timely, drop entry[0] (and repeat). We apply an // additional criterion here: we only drop the earlier buffer if our // desiredPresent falls within +/- 1 second of the expected present. // Otherwise, bogus desiredPresent times (e.g., 0 or a small // relative timestamp), which normally mean "ignore the timestamp // and acquire immediately", would cause us to drop frames. // // We may want to add an additional criterion: don't drop the // earlier buffer if entry[1]'s fence hasn't signaled yet. const BufferItem& bufferItem(mCore->mQueue[1]); nsecs_t desiredPresent = bufferItem.mTimestamp; if (desiredPresent < expectedPresent - MAX_REASONABLE_NSEC || desiredPresent > expectedPresent) { // This buffer is set to display in the near future, or // desiredPresent is garbage. Either way we don't want to drop // the previous buffer just to get this on the screen sooner. BQ_LOGV("acquireBuffer: nodrop desire=%" PRId64 " expect=%" PRId64 " (%" PRId64 ") now=%" PRId64, desiredPresent, expectedPresent, desiredPresent - expectedPresent, systemTime(CLOCK_MONOTONIC)); break; } BQ_LOGV("acquireBuffer: drop desire=%" PRId64 " expect=%" PRId64 " size=%zu", desiredPresent, expectedPresent, mCore->mQueue.size()); if (mCore->stillTracking(front)) { #ifdef MTK_AOSP_ENHANCEMENT BQ_LOGI("acquireBuffer: slot %d is dropped, handle=%p", front->mSlot, mSlots[front->mSlot].mGraphicBuffer->handle); char ___traceBuf[128]; snprintf(___traceBuf, 128, "dropped:%d (h:%p)", front->mSlot, mSlots[front->mSlot].mGraphicBuffer->handle); android::ScopedTrace ___bufTracer(ATRACE_TAG, ___traceBuf); #endif // Front buffer is still in mSlots, so mark the slot as free mSlots[front->mSlot].mBufferState = BufferSlot::FREE; } mCore->mQueue.erase(front); front = mCore->mQueue.begin(); } // See if the front buffer is due nsecs_t desiredPresent = front->mTimestamp; if (desiredPresent > expectedPresent && desiredPresent < expectedPresent + MAX_REASONABLE_NSEC) { BQ_LOGV("acquireBuffer: defer desire=%" PRId64 " expect=%" PRId64 " (%" PRId64 ") now=%" PRId64, desiredPresent, expectedPresent, desiredPresent - expectedPresent, systemTime(CLOCK_MONOTONIC)); #ifdef MTK_AOSP_ENHANCEMENT if (mCore->debugger.mConnectedApi == NATIVE_WINDOW_API_MEDIA) { char ___traceBuf[128]; snprintf(___traceBuf, 128, " defer %s(us)", mCore->mConsumerName.string()); ATRACE_INT_PERF(___traceBuf, (desiredPresent - expectedPresent) / 1000); snprintf(___traceBuf, 128, "desire=%" PRId64 " expect=%" PRId64, desiredPresent, expectedPresent); ATRACE_NAME(___traceBuf); } #endif return PRESENT_LATER; } #ifdef MTK_AOSP_ENHANCEMENT if (mCore->debugger.mConnectedApi == NATIVE_WINDOW_API_MEDIA) { char ___traceBuf[128]; snprintf(___traceBuf, 128, " defer %s(us)", mCore->mConsumerName.string()); ATRACE_INT_PERF(___traceBuf, 0); } #endif BQ_LOGV("acquireBuffer: accept desire=%" PRId64 " expect=%" PRId64 " " "(%" PRId64 ") now=%" PRId64, desiredPresent, expectedPresent, desiredPresent - expectedPresent, systemTime(CLOCK_MONOTONIC)); } int slot = front->mSlot; *outBuffer = *front; ATRACE_BUFFER_INDEX(slot); BQ_LOGV("acquireBuffer: acquiring { slot=%d/%" PRIu64 " buffer=%p }", slot, front->mFrameNumber, front->mGraphicBuffer->handle); // If the front buffer is still being tracked, update its slot state if (mCore->stillTracking(front)) { mSlots[slot].mAcquireCalled = true; mSlots[slot].mNeedsCleanupOnRelease = false; mSlots[slot].mBufferState = BufferSlot::ACQUIRED; mSlots[slot].mFence = Fence::NO_FENCE; } // If the buffer has previously been acquired by the consumer, set // mGraphicBuffer to NULL to avoid unnecessarily remapping this buffer // on the consumer side if (outBuffer->mAcquireCalled) { outBuffer->mGraphicBuffer = NULL; } #ifdef MTK_AOSP_ENHANCEMENT // 1. for dump, buffers holded by BufferQueueDump should be updated // 2. to draw white debug line mCore->debugger.onAcquire( slot, front->mGraphicBuffer, front->mFence, front->mTimestamp, outBuffer); #endif mCore->mQueue.erase(front); // We might have freed a slot while dropping old buffers, or the producer // may be blocked waiting for the number of buffers in the queue to // decrease. mCore->mDequeueCondition.broadcast(); #ifdef MTK_AOSP_ENHANCEMENT ATRACE_INT_PERF(mCore->mConsumerName.string(), mCore->mQueue.size()); #else ATRACE_INT(mCore->mConsumerName.string(), mCore->mQueue.size()); #endif return NO_ERROR; }
status_t Camera3OutputStream::returnBufferCheckedLocked( const camera3_stream_buffer &buffer, nsecs_t timestamp, bool output, /*out*/ sp<Fence> *releaseFenceOut) { (void)output; ALOG_ASSERT(output, "Expected output to be true"); status_t res; sp<Fence> releaseFence; /** * Fence management - calculate Release Fence */ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) { if (buffer.release_fence != -1) { ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when " "there is an error", __FUNCTION__, mId, buffer.release_fence); close(buffer.release_fence); } /** * Reassign release fence as the acquire fence in case of error */ releaseFence = new Fence(buffer.acquire_fence); } else { res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp); if (res != OK) { ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } releaseFence = new Fence(buffer.release_fence); } int anwReleaseFence = releaseFence->dup(); /** * Release the lock briefly to avoid deadlock with * StreamingProcessor::startStream -> Camera3Stream::isConfiguring (this * thread will go into StreamingProcessor::onFrameAvailable) during * queueBuffer */ sp<ANativeWindow> currentConsumer = mConsumer; mLock.unlock(); /** * Return buffer back to ANativeWindow */ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) { // Cancel buffer res = currentConsumer->cancelBuffer(currentConsumer.get(), container_of(buffer.buffer, ANativeWindowBuffer, handle), anwReleaseFence); if (res != OK) { ALOGE("%s: Stream %d: Error cancelling buffer to native window:" " %s (%d)", __FUNCTION__, mId, strerror(-res), res); } } else { if (mTraceFirstBuffer && (stream_type == CAMERA3_STREAM_OUTPUT)) { { char traceLog[48]; snprintf(traceLog, sizeof(traceLog), "Stream %d: first full buffer\n", mId); ATRACE_NAME(traceLog); } mTraceFirstBuffer = false; } res = currentConsumer->queueBuffer(currentConsumer.get(), container_of(buffer.buffer, ANativeWindowBuffer, handle), anwReleaseFence); if (res != OK) { ALOGE("%s: Stream %d: Error queueing buffer to native window: " "%s (%d)", __FUNCTION__, mId, strerror(-res), res); } } mLock.lock(); if (res != OK) { close(anwReleaseFence); } *releaseFenceOut = releaseFence; return res; }
bool FastThread::threadLoop() { for (;;) { // either nanosleep, sched_yield, or busy wait if (sleepNs >= 0) { if (sleepNs > 0) { ALOG_ASSERT(sleepNs < 1000000000); const struct timespec req = {0, sleepNs}; nanosleep(&req, NULL); } else { sched_yield(); } } // default to long sleep for next cycle sleepNs = FAST_DEFAULT_NS; // poll for state change const FastThreadState *next = poll(); if (next == NULL) { // continue to use the default initial state until a real state is available // FIXME &initial not available, should save address earlier //ALOG_ASSERT(current == &initial && previous == &initial); next = current; } command = next->mCommand; if (next != current) { // As soon as possible of learning of a new dump area, start using it dumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState; logWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &dummyLogWriter; setLog(logWriter); // We want to always have a valid reference to the previous (non-idle) state. // However, the state queue only guarantees access to current and previous states. // So when there is a transition from a non-idle state into an idle state, we make a // copy of the last known non-idle state so it is still available on return from idle. // The possible transitions are: // non-idle -> non-idle update previous from current in-place // non-idle -> idle update previous from copy of current // idle -> idle don't update previous // idle -> non-idle don't update previous if (!(current->mCommand & FastThreadState::IDLE)) { if (command & FastThreadState::IDLE) { onIdle(); oldTsValid = false; #ifdef FAST_MIXER_STATISTICS oldLoadValid = false; #endif ignoreNextOverrun = true; } previous = current; } current = next; } #if !LOG_NDEBUG next = NULL; // not referenced again #endif dumpState->mCommand = command; // << current, previous, command, dumpState >> switch (command) { case FastThreadState::INITIAL: case FastThreadState::HOT_IDLE: sleepNs = FAST_HOT_IDLE_NS; continue; case FastThreadState::COLD_IDLE: // only perform a cold idle command once // FIXME consider checking previous state and only perform if previous != COLD_IDLE if (current->mColdGen != coldGen) { int32_t *coldFutexAddr = current->mColdFutexAddr; ALOG_ASSERT(coldFutexAddr != NULL); int32_t old = android_atomic_dec(coldFutexAddr); if (old <= 0) { syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL); } int policy = sched_getscheduler(0); if (!(policy == SCHED_FIFO || policy == SCHED_RR)) { ALOGE("did not receive expected priority boost"); } // This may be overly conservative; there could be times that the normal mixer // requests such a brief cold idle that it doesn't require resetting this flag. isWarm = false; measuredWarmupTs.tv_sec = 0; measuredWarmupTs.tv_nsec = 0; warmupCycles = 0; sleepNs = -1; coldGen = current->mColdGen; #ifdef FAST_MIXER_STATISTICS bounds = 0; full = false; #endif oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs); timestampStatus = INVALID_OPERATION; } else { sleepNs = FAST_HOT_IDLE_NS; } continue; case FastThreadState::EXIT: onExit(); return false; default: LOG_ALWAYS_FATAL_IF(!isSubClassCommand(command)); break; } // there is a non-idle state available to us; did the state change? if (current != previous) { onStateChange(); #if 1 // FIXME shouldn't need this // only process state change once previous = current; #endif } // do work using current state here attemptedWrite = false; onWork(); // To be exactly periodic, compute the next sleep time based on current time. // This code doesn't have long-term stability when the sink is non-blocking. // FIXME To avoid drift, use the local audio clock or watch the sink's fill status. struct timespec newTs; int rc = clock_gettime(CLOCK_MONOTONIC, &newTs); if (rc == 0) { //logWriter->logTimestamp(newTs); if (oldTsValid) { time_t sec = newTs.tv_sec - oldTs.tv_sec; long nsec = newTs.tv_nsec - oldTs.tv_nsec; ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0), "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld", oldTs.tv_sec, oldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec); if (nsec < 0) { --sec; nsec += 1000000000; } // To avoid an initial underrun on fast tracks after exiting standby, // do not start pulling data from tracks and mixing until warmup is complete. // Warmup is considered complete after the earlier of: // MIN_WARMUP_CYCLES write() attempts and last one blocks for at least warmupNs // MAX_WARMUP_CYCLES write() attempts. // This is overly conservative, but to get better accuracy requires a new HAL API. if (!isWarm && attemptedWrite) { measuredWarmupTs.tv_sec += sec; measuredWarmupTs.tv_nsec += nsec; if (measuredWarmupTs.tv_nsec >= 1000000000) { measuredWarmupTs.tv_sec++; measuredWarmupTs.tv_nsec -= 1000000000; } ++warmupCycles; if ((nsec > warmupNs && warmupCycles >= MIN_WARMUP_CYCLES) || (warmupCycles >= MAX_WARMUP_CYCLES)) { isWarm = true; dumpState->mMeasuredWarmupTs = measuredWarmupTs; dumpState->mWarmupCycles = warmupCycles; } } sleepNs = -1; if (isWarm) { if (sec > 0 || nsec > underrunNs) { ATRACE_NAME("underrun"); // FIXME only log occasionally ALOGV("underrun: time since last cycle %d.%03ld sec", (int) sec, nsec / 1000000L); dumpState->mUnderruns++; ignoreNextOverrun = true; } else if (nsec < overrunNs) { if (ignoreNextOverrun) { ignoreNextOverrun = false; } else { // FIXME only log occasionally ALOGV("overrun: time since last cycle %d.%03ld sec", (int) sec, nsec / 1000000L); dumpState->mOverruns++; } // This forces a minimum cycle time. It: // - compensates for an audio HAL with jitter due to sample rate conversion // - works with a variable buffer depth audio HAL that never pulls at a // rate < than overrunNs per buffer. // - recovers from overrun immediately after underrun // It doesn't work with a non-blocking audio HAL. sleepNs = forceNs - nsec; } else { ignoreNextOverrun = false; } } #ifdef FAST_MIXER_STATISTICS if (isWarm) { // advance the FIFO queue bounds size_t i = bounds & (dumpState->mSamplingN - 1); bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF); if (full) { bounds += 0x10000; } else if (!(bounds & (dumpState->mSamplingN - 1))) { full = true; } // compute the delta value of clock_gettime(CLOCK_MONOTONIC) uint32_t monotonicNs = nsec; if (sec > 0 && sec < 4) { monotonicNs += sec * 1000000000; } // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) uint32_t loadNs = 0; struct timespec newLoad; rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad); if (rc == 0) { if (oldLoadValid) { sec = newLoad.tv_sec - oldLoad.tv_sec; nsec = newLoad.tv_nsec - oldLoad.tv_nsec; if (nsec < 0) { --sec; nsec += 1000000000; } loadNs = nsec; if (sec > 0 && sec < 4) { loadNs += sec * 1000000000; } } else { // first time through the loop oldLoadValid = true; } oldLoad = newLoad; } #ifdef CPU_FREQUENCY_STATISTICS // get the absolute value of CPU clock frequency in kHz int cpuNum = sched_getcpu(); uint32_t kHz = tcu.getCpukHz(cpuNum); kHz = (kHz << 4) | (cpuNum & 0xF); #endif // save values in FIFO queues for dumpsys // these stores #1, #2, #3 are not atomic with respect to each other, // or with respect to store #4 below dumpState->mMonotonicNs[i] = monotonicNs; dumpState->mLoadNs[i] = loadNs; #ifdef CPU_FREQUENCY_STATISTICS dumpState->mCpukHz[i] = kHz; #endif // this store #4 is not atomic with respect to stores #1, #2, #3 above, but // the newest open & oldest closed halves are atomic with respect to each other dumpState->mBounds = bounds; ATRACE_INT("cycle_ms", monotonicNs / 1000000); ATRACE_INT("load_us", loadNs / 1000); } #endif } else { // first time through the loop oldTsValid = true; sleepNs = periodNs; ignoreNextOverrun = true; } oldTs = newTs; } else { // monotonic clock is broken oldTsValid = false; sleepNs = periodNs; } } // for (;;) // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion }
virtual void onProcess(const sp<Task<VertexBuffer*> >& task) { TessellationTask* t = static_cast<TessellationTask*>(task.get()); ATRACE_NAME("shape tessellation"); VertexBuffer* buffer = t->tessellator(t->description); t->setResult(buffer); }