void GpuMemoryTracker::onFrameCompleted() {
    if (ATRACE_ENABLED()) {
        char buf[128];
        for (int type = 0; type < NUM_TYPES; type++) {
            snprintf(buf, 128, "hwui_%s", TYPE_NAMES[type]);
            const TypeStats& stats = gObjectStats[type];
            ATRACE_INT(buf, stats.totalSize);
            snprintf(buf, 128, "hwui_%s_count", TYPE_NAMES[type]);
            ATRACE_INT(buf, stats.count);
        }
    }

    std::vector<const Texture*> freeList;
    for (const auto& obj : gObjectSet) {
        if (obj->objectType() == GpuObjectType::Texture) {
            const Texture* texture = static_cast<Texture*>(obj);
            if (texture->cleanup) {
                ALOGE("Leaked texture marked for cleanup! id=%u, size %ux%u",
                        texture->id(), texture->width(), texture->height());
                freeList.push_back(texture);
            }
        }
    }
    for (auto& texture : freeList) {
        const_cast<Texture*>(texture)->deleteTexture();
        delete texture;
    }
}
Example #2
0
void notify_function(union sigval sv)
{
    int ok;
    struct timespec ts;
    //write(1, ".", 1);
    if (count == 0) {
        printf("notify_function: getpid()=%d, gettid()=%d\n", getpid(), gettid());
        ok = sched_getscheduler(gettid());
        printf("scheduler = %d\n", ok);
    }
    if (count < MAX_COUNT) {
        ok = clock_gettime(CLOCK_MONOTONIC, &ts);
        if (0 == ok) {
            unsigned delta_sec = ts.tv_sec - previous.tv_sec;
            int delta_ns = ts.tv_nsec - previous.tv_nsec;
            if (delta_ns < 0) {
                delta_ns += 1000000000;
                --delta_sec;
            }
            struct timespec delta_x;
            delta_x.tv_sec = delta_sec;
            delta_x.tv_nsec = delta_ns;
            delta_ts[count++] = delta_x;
            previous = ts;
            ATRACE_INT("cycle_us", delta_ns / 1000);
        }
    }
}
status_t BufferQueue::acquireBuffer(BufferItem *buffer) {
    ATRACE_CALL();
    Mutex::Autolock _l(mMutex);

    // Check that the consumer doesn't currently have the maximum number of
    // buffers acquired.  We allow the max buffer count to be exceeded by one
    // buffer, so that the consumer can successfully set up the newly acquired
    // buffer before releasing the old one.
    int numAcquiredBuffers = 0;
    for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
        if (mSlots[i].mBufferState == BufferSlot::ACQUIRED) {
            numAcquiredBuffers++;
        }
    }
    if (numAcquiredBuffers >= mMaxAcquiredBufferCount+1) {
        ST_LOGE("acquireBuffer: max acquired buffer count reached: %d (max=%d)",
                numAcquiredBuffers, mMaxAcquiredBufferCount);
        return INVALID_OPERATION;
    }

    // check if queue is empty
    // In asynchronous mode the list is guaranteed to be one buffer
    // deep, while in synchronous mode we use the oldest buffer.
    if (!mQueue.empty()) {
        Fifo::iterator front(mQueue.begin());
        int buf = *front;

        ATRACE_BUFFER_INDEX(buf);

        if (mSlots[buf].mAcquireCalled) {
            buffer->mGraphicBuffer = NULL;
        } else {
            buffer->mGraphicBuffer = mSlots[buf].mGraphicBuffer;
        }
        buffer->mCrop = mSlots[buf].mCrop;
        buffer->mTransform = mSlots[buf].mTransform;
        buffer->mScalingMode = mSlots[buf].mScalingMode;
        buffer->mFrameNumber = mSlots[buf].mFrameNumber;
        buffer->mTimestamp = mSlots[buf].mTimestamp;
        buffer->mBuf = buf;
        buffer->mFence = mSlots[buf].mFence;

        mSlots[buf].mAcquireCalled = true;
        mSlots[buf].mNeedsCleanupOnRelease = false;
        mSlots[buf].mBufferState = BufferSlot::ACQUIRED;
        mSlots[buf].mFence = Fence::NO_FENCE;

        mQueue.erase(front);
        mDequeueCondition.broadcast();

        ATRACE_INT(mConsumerName.string(), mQueue.size());
    } else {
        return NO_BUFFER_AVAILABLE;
    }

    return NO_ERROR;
}
Example #4
0
status_t BufferQueue::acquireBuffer(BufferItem *buffer) {
    ATRACE_CALL();
    Mutex::Autolock _l(mMutex);
    // check if queue is empty
    // In asynchronous mode the list is guaranteed to be one buffer
    // deep, while in synchronous mode we use the oldest buffer.
    if (!mQueue.empty()) {
        Fifo::iterator front(mQueue.begin());
        int buf = *front;

        ATRACE_BUFFER_INDEX(buf);

        if (mSlots[buf].mAcquireCalled) {
            buffer->mGraphicBuffer = NULL;
        } else {
            buffer->mGraphicBuffer = mSlots[buf].mGraphicBuffer;
        }
        buffer->mCrop = mSlots[buf].mCrop;
        buffer->mTransform = mSlots[buf].mTransform;
        buffer->mScalingMode = mSlots[buf].mScalingMode;
        buffer->mFrameNumber = mSlots[buf].mFrameNumber;
        buffer->mTimestamp = mSlots[buf].mTimestamp;
        buffer->mBuf = buf;
        mSlots[buf].mAcquireCalled = true;

        mSlots[buf].mBufferState = BufferSlot::ACQUIRED;
        mQueue.erase(front);
        mDequeueCondition.broadcast();

        ATRACE_INT(mConsumerName.string(), mQueue.size());
    } else {
        return NO_BUFFER_AVAILABLE;
    }

    return OK;
}
void FrameProcessorBase::processNewFrames(const sp<CameraDeviceBase> &device) {
    status_t res;
    ATRACE_CALL();
    CameraMetadata frame;

    ALOGV("%s: Camera %d: Process new frames", __FUNCTION__, device->getId());

    while ( (res = device->getNextFrame(&frame)) == OK) {

        camera_metadata_entry_t entry;

        entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
        if (entry.count == 0) {
            ALOGE("%s: Camera %d: Error reading frame number",
                    __FUNCTION__, device->getId());
            break;
        }
        ATRACE_INT("cam2_frame", entry.data.i32[0]);

        if (!processSingleFrame(frame, device)) {
            break;
        }

        if (!frame.isEmpty()) {
            Mutex::Autolock al(mLastFrameMutex);
            mLastFrame.acquire(frame);
        }
    }
    if (res != NOT_ENOUGH_DATA) {
        ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
                __FUNCTION__, device->getId(), strerror(-res), res);
        return;
    }

    return;
}
Example #6
0
status_t BufferQueue::queueBuffer(int buf,
        const QueueBufferInput& input, QueueBufferOutput* output) {
    ATRACE_CALL();
    ATRACE_BUFFER_INDEX(buf);

    Rect crop;
    uint32_t transform;
    int scalingMode;
    int64_t timestamp;

    input.deflate(&timestamp, &crop, &scalingMode, &transform);

    ST_LOGV("queueBuffer: slot=%d time=%#llx crop=[%d,%d,%d,%d] tr=%#x "
            "scale=%s",
            buf, timestamp, crop.left, crop.top, crop.right, crop.bottom,
            transform, scalingModeName(scalingMode));

    sp<ConsumerListener> listener;

    { // scope for the lock
        Mutex::Autolock lock(mMutex);
        if (mAbandoned) {
            ST_LOGE("queueBuffer: SurfaceTexture has been abandoned!");
            return NO_INIT;
        }
        if (buf < 0 || buf >= mBufferCount) {
            ST_LOGE("queueBuffer: slot index out of range [0, %d]: %d",
                    mBufferCount, buf);
            return -EINVAL;
        } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) {
            ST_LOGE("queueBuffer: slot %d is not owned by the client "
                    "(state=%d)", buf, mSlots[buf].mBufferState);
            return -EINVAL;
        } else if (!mSlots[buf].mRequestBufferCalled) {
            ST_LOGE("queueBuffer: slot %d was enqueued without requesting a "
                    "buffer", buf);
            return -EINVAL;
        }

        const sp<GraphicBuffer>& graphicBuffer(mSlots[buf].mGraphicBuffer);
        Rect bufferRect(graphicBuffer->getWidth(), graphicBuffer->getHeight());
        Rect croppedCrop;
        crop.intersect(bufferRect, &croppedCrop);
        if (croppedCrop != crop) {
            ST_LOGE("queueBuffer: crop rect is not contained within the "
                    "buffer in slot %d", buf);
            return -EINVAL;
        }

        if (mSynchronousMode) {
            // In synchronous mode we queue all buffers in a FIFO.
            mQueue.push_back(buf);

            // Synchronous mode always signals that an additional frame should
            // be consumed.
            listener = mConsumerListener;
        } else {
            // In asynchronous mode we only keep the most recent buffer.
            if (mQueue.empty()) {
                mQueue.push_back(buf);

                // Asynchronous mode only signals that a frame should be
                // consumed if no previous frame was pending. If a frame were
                // pending then the consumer would have already been notified.
                listener = mConsumerListener;
            } else {
                Fifo::iterator front(mQueue.begin());
                // buffer currently queued is freed
                mSlots[*front].mBufferState = BufferSlot::FREE;
                // and we record the new buffer index in the queued list
                *front = buf;
            }
        }

        mSlots[buf].mTimestamp = timestamp;
        mSlots[buf].mCrop = crop;
        mSlots[buf].mTransform = transform;

        switch (scalingMode) {
            case NATIVE_WINDOW_SCALING_MODE_FREEZE:
            case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
            case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
                break;
            default:
                ST_LOGE("unknown scaling mode: %d (ignoring)", scalingMode);
                scalingMode = mSlots[buf].mScalingMode;
                break;
        }

        mSlots[buf].mBufferState = BufferSlot::QUEUED;
        mSlots[buf].mScalingMode = scalingMode;
        mFrameCounter++;
        mSlots[buf].mFrameNumber = mFrameCounter;

        mBufferHasBeenQueued = true;
        mDequeueCondition.broadcast();

        output->inflate(mDefaultWidth, mDefaultHeight, mTransformHint,
                mQueue.size());

        ATRACE_INT(mConsumerName.string(), mQueue.size());
    } // scope for the lock

    // call back without lock held
    if (listener != 0) {
        listener->onFrameAvailable();
    }
    return OK;
}
bool FastThread::threadLoop()
{
    for (;;) {

        // either nanosleep, sched_yield, or busy wait
        if (sleepNs >= 0) {
            if (sleepNs > 0) {
                ALOG_ASSERT(sleepNs < 1000000000);
                const struct timespec req = {0, sleepNs};
                nanosleep(&req, NULL);
            } else {
                sched_yield();
            }
        }
        // default to long sleep for next cycle
        sleepNs = FAST_DEFAULT_NS;

        // poll for state change
        const FastThreadState *next = poll();
        if (next == NULL) {
            // continue to use the default initial state until a real state is available
            // FIXME &initial not available, should save address earlier
            //ALOG_ASSERT(current == &initial && previous == &initial);
            next = current;
        }

        command = next->mCommand;
        if (next != current) {

            // As soon as possible of learning of a new dump area, start using it
            dumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
            logWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &dummyLogWriter;
            setLog(logWriter);

            // We want to always have a valid reference to the previous (non-idle) state.
            // However, the state queue only guarantees access to current and previous states.
            // So when there is a transition from a non-idle state into an idle state, we make a
            // copy of the last known non-idle state so it is still available on return from idle.
            // The possible transitions are:
            //  non-idle -> non-idle    update previous from current in-place
            //  non-idle -> idle        update previous from copy of current
            //  idle     -> idle        don't update previous
            //  idle     -> non-idle    don't update previous
            if (!(current->mCommand & FastThreadState::IDLE)) {
                if (command & FastThreadState::IDLE) {
                    onIdle();
                    oldTsValid = false;
#ifdef FAST_MIXER_STATISTICS
                    oldLoadValid = false;
#endif
                    ignoreNextOverrun = true;
                }
                previous = current;
            }
            current = next;
        }
#if !LOG_NDEBUG
        next = NULL;    // not referenced again
#endif

        dumpState->mCommand = command;

        // << current, previous, command, dumpState >>

        switch (command) {
        case FastThreadState::INITIAL:
        case FastThreadState::HOT_IDLE:
            sleepNs = FAST_HOT_IDLE_NS;
            continue;
        case FastThreadState::COLD_IDLE:
            // only perform a cold idle command once
            // FIXME consider checking previous state and only perform if previous != COLD_IDLE
            if (current->mColdGen != coldGen) {
                int32_t *coldFutexAddr = current->mColdFutexAddr;
                ALOG_ASSERT(coldFutexAddr != NULL);
                int32_t old = android_atomic_dec(coldFutexAddr);
                if (old <= 0) {
                    syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
                }
                int policy = sched_getscheduler(0);
                if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
                    ALOGE("did not receive expected priority boost");
                }
                // This may be overly conservative; there could be times that the normal mixer
                // requests such a brief cold idle that it doesn't require resetting this flag.
                isWarm = false;
                measuredWarmupTs.tv_sec = 0;
                measuredWarmupTs.tv_nsec = 0;
                warmupCycles = 0;
                sleepNs = -1;
                coldGen = current->mColdGen;
#ifdef FAST_MIXER_STATISTICS
                bounds = 0;
                full = false;
#endif
                oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs);
                timestampStatus = INVALID_OPERATION;
            } else {
                sleepNs = FAST_HOT_IDLE_NS;
            }
            continue;
        case FastThreadState::EXIT:
            onExit();
            return false;
        default:
            LOG_ALWAYS_FATAL_IF(!isSubClassCommand(command));
            break;
        }

        // there is a non-idle state available to us; did the state change?
        if (current != previous) {
            onStateChange();
#if 1   // FIXME shouldn't need this
            // only process state change once
            previous = current;
#endif
        }

        // do work using current state here
        attemptedWrite = false;
        onWork();

        // To be exactly periodic, compute the next sleep time based on current time.
        // This code doesn't have long-term stability when the sink is non-blocking.
        // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
        struct timespec newTs;
        int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
        if (rc == 0) {
            //logWriter->logTimestamp(newTs);
            if (oldTsValid) {
                time_t sec = newTs.tv_sec - oldTs.tv_sec;
                long nsec = newTs.tv_nsec - oldTs.tv_nsec;
                ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
                        "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
                        oldTs.tv_sec, oldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
                if (nsec < 0) {
                    --sec;
                    nsec += 1000000000;
                }
                // To avoid an initial underrun on fast tracks after exiting standby,
                // do not start pulling data from tracks and mixing until warmup is complete.
                // Warmup is considered complete after the earlier of:
                //      MIN_WARMUP_CYCLES write() attempts and last one blocks for at least warmupNs
                //      MAX_WARMUP_CYCLES write() attempts.
                // This is overly conservative, but to get better accuracy requires a new HAL API.
                if (!isWarm && attemptedWrite) {
                    measuredWarmupTs.tv_sec += sec;
                    measuredWarmupTs.tv_nsec += nsec;
                    if (measuredWarmupTs.tv_nsec >= 1000000000) {
                        measuredWarmupTs.tv_sec++;
                        measuredWarmupTs.tv_nsec -= 1000000000;
                    }
                    ++warmupCycles;
                    if ((nsec > warmupNs && warmupCycles >= MIN_WARMUP_CYCLES) ||
                            (warmupCycles >= MAX_WARMUP_CYCLES)) {
                        isWarm = true;
                        dumpState->mMeasuredWarmupTs = measuredWarmupTs;
                        dumpState->mWarmupCycles = warmupCycles;
                    }
                }
                sleepNs = -1;
                if (isWarm) {
                    if (sec > 0 || nsec > underrunNs) {
                        ATRACE_NAME("underrun");
                        // FIXME only log occasionally
                        ALOGV("underrun: time since last cycle %d.%03ld sec",
                                (int) sec, nsec / 1000000L);
                        dumpState->mUnderruns++;
                        ignoreNextOverrun = true;
                    } else if (nsec < overrunNs) {
                        if (ignoreNextOverrun) {
                            ignoreNextOverrun = false;
                        } else {
                            // FIXME only log occasionally
                            ALOGV("overrun: time since last cycle %d.%03ld sec",
                                    (int) sec, nsec / 1000000L);
                            dumpState->mOverruns++;
                        }
                        // This forces a minimum cycle time. It:
                        //  - compensates for an audio HAL with jitter due to sample rate conversion
                        //  - works with a variable buffer depth audio HAL that never pulls at a
                        //    rate < than overrunNs per buffer.
                        //  - recovers from overrun immediately after underrun
                        // It doesn't work with a non-blocking audio HAL.
                        sleepNs = forceNs - nsec;
                    } else {
                        ignoreNextOverrun = false;
                    }
                }
#ifdef FAST_MIXER_STATISTICS
                if (isWarm) {
                    // advance the FIFO queue bounds
                    size_t i = bounds & (dumpState->mSamplingN - 1);
                    bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
                    if (full) {
                        bounds += 0x10000;
                    } else if (!(bounds & (dumpState->mSamplingN - 1))) {
                        full = true;
                    }
                    // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
                    uint32_t monotonicNs = nsec;
                    if (sec > 0 && sec < 4) {
                        monotonicNs += sec * 1000000000;
                    }
                    // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
                    uint32_t loadNs = 0;
                    struct timespec newLoad;
                    rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
                    if (rc == 0) {
                        if (oldLoadValid) {
                            sec = newLoad.tv_sec - oldLoad.tv_sec;
                            nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
                            if (nsec < 0) {
                                --sec;
                                nsec += 1000000000;
                            }
                            loadNs = nsec;
                            if (sec > 0 && sec < 4) {
                                loadNs += sec * 1000000000;
                            }
                        } else {
                            // first time through the loop
                            oldLoadValid = true;
                        }
                        oldLoad = newLoad;
                    }
#ifdef CPU_FREQUENCY_STATISTICS
                    // get the absolute value of CPU clock frequency in kHz
                    int cpuNum = sched_getcpu();
                    uint32_t kHz = tcu.getCpukHz(cpuNum);
                    kHz = (kHz << 4) | (cpuNum & 0xF);
#endif
                    // save values in FIFO queues for dumpsys
                    // these stores #1, #2, #3 are not atomic with respect to each other,
                    // or with respect to store #4 below
                    dumpState->mMonotonicNs[i] = monotonicNs;
                    dumpState->mLoadNs[i] = loadNs;
#ifdef CPU_FREQUENCY_STATISTICS
                    dumpState->mCpukHz[i] = kHz;
#endif
                    // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
                    // the newest open & oldest closed halves are atomic with respect to each other
                    dumpState->mBounds = bounds;
                    ATRACE_INT("cycle_ms", monotonicNs / 1000000);
                    ATRACE_INT("load_us", loadNs / 1000);
                }
#endif
            } else {
                // first time through the loop
                oldTsValid = true;
                sleepNs = periodNs;
                ignoreNextOverrun = true;
            }
            oldTs = newTs;
        } else {
            // monotonic clock is broken
            oldTsValid = false;
            sleepNs = periodNs;
        }

    }   // for (;;)

    // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
}
Example #8
0
void HWComposer::vsync(int dpy, int64_t timestamp) {
    ATRACE_INT("VSYNC", ++mVSyncCount&1);
    mEventHandler.onVSyncReceived(dpy, timestamp);
}
status_t BufferQueueConsumer::acquireBuffer(BufferItem* outBuffer,
        nsecs_t expectedPresent, uint64_t maxFrameNumber) {
    ATRACE_CALL();

    int numDroppedBuffers = 0;
    sp<IProducerListener> listener;
    {
        Mutex::Autolock lock(mCore->mMutex);

        // Check that the consumer doesn't currently have the maximum number of
        // buffers acquired. We allow the max buffer count to be exceeded by one
        // buffer so that the consumer can successfully set up the newly acquired
        // buffer before releasing the old one.
        int numAcquiredBuffers = 0;
        for (int s = 0; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
            if (mSlots[s].mBufferState == BufferSlot::ACQUIRED) {
                ++numAcquiredBuffers;
            }
        }
        if (numAcquiredBuffers >= mCore->mMaxAcquiredBufferCount + 1) {
            BQ_LOGE("acquireBuffer: max acquired buffer count reached: %d (max %d)",
                    numAcquiredBuffers, mCore->mMaxAcquiredBufferCount);
            return INVALID_OPERATION;
        }

        // Check if the queue is empty.
        // In asynchronous mode the list is guaranteed to be one buffer deep,
        // while in synchronous mode we use the oldest buffer.
        if (mCore->mQueue.empty()) {
            return NO_BUFFER_AVAILABLE;
        }

        BufferQueueCore::Fifo::iterator front(mCore->mQueue.begin());

        // If expectedPresent is specified, we may not want to return a buffer yet.
        // If it's specified and there's more than one buffer queued, we may want
        // to drop a buffer.
        if (expectedPresent != 0) {
            const int MAX_REASONABLE_NSEC = 1000000000ULL; // 1 second

            // The 'expectedPresent' argument indicates when the buffer is expected
            // to be presented on-screen. If the buffer's desired present time is
            // earlier (less) than expectedPresent -- meaning it will be displayed
            // on time or possibly late if we show it as soon as possible -- we
            // acquire and return it. If we don't want to display it until after the
            // expectedPresent time, we return PRESENT_LATER without acquiring it.
            //
            // To be safe, we don't defer acquisition if expectedPresent is more
            // than one second in the future beyond the desired present time
            // (i.e., we'd be holding the buffer for a long time).
            //
            // NOTE: Code assumes monotonic time values from the system clock
            // are positive.

            // Start by checking to see if we can drop frames. We skip this check if
            // the timestamps are being auto-generated by Surface. If the app isn't
            // generating timestamps explicitly, it probably doesn't want frames to
            // be discarded based on them.
            while (mCore->mQueue.size() > 1 && !mCore->mQueue[0].mIsAutoTimestamp) {
                const BufferItem& bufferItem(mCore->mQueue[1]);

                // If dropping entry[0] would leave us with a buffer that the
                // consumer is not yet ready for, don't drop it.
                if (maxFrameNumber && bufferItem.mFrameNumber > maxFrameNumber) {
                    break;
                }

                // If entry[1] is timely, drop entry[0] (and repeat). We apply an
                // additional criterion here: we only drop the earlier buffer if our
                // desiredPresent falls within +/- 1 second of the expected present.
                // Otherwise, bogus desiredPresent times (e.g., 0 or a small
                // relative timestamp), which normally mean "ignore the timestamp
                // and acquire immediately", would cause us to drop frames.
                //
                // We may want to add an additional criterion: don't drop the
                // earlier buffer if entry[1]'s fence hasn't signaled yet.
                nsecs_t desiredPresent = bufferItem.mTimestamp;
                if (desiredPresent < expectedPresent - MAX_REASONABLE_NSEC ||
                        desiredPresent > expectedPresent) {
                    // This buffer is set to display in the near future, or
                    // desiredPresent is garbage. Either way we don't want to drop
                    // the previous buffer just to get this on the screen sooner.
                    BQ_LOGV("acquireBuffer: nodrop desire=%" PRId64 " expect=%"
                            PRId64 " (%" PRId64 ") now=%" PRId64,
                            desiredPresent, expectedPresent,
                            desiredPresent - expectedPresent,
                            systemTime(CLOCK_MONOTONIC));
                    break;
                }

                BQ_LOGV("acquireBuffer: drop desire=%" PRId64 " expect=%" PRId64
                        " size=%zu",
                        desiredPresent, expectedPresent, mCore->mQueue.size());
                if (mCore->stillTracking(front)) {
                    // Front buffer is still in mSlots, so mark the slot as free
                    mSlots[front->mSlot].mBufferState = BufferSlot::FREE;
                    mCore->mFreeBuffers.push_back(front->mSlot);
                    listener = mCore->mConnectedProducerListener;
                    ++numDroppedBuffers;
                }
                mCore->mQueue.erase(front);
                front = mCore->mQueue.begin();
            }

            // See if the front buffer is ready to be acquired
            nsecs_t desiredPresent = front->mTimestamp;
            bool bufferIsDue = desiredPresent <= expectedPresent ||
                    desiredPresent > expectedPresent + MAX_REASONABLE_NSEC;
            bool consumerIsReady = maxFrameNumber > 0 ?
                    front->mFrameNumber <= maxFrameNumber : true;
            if (!bufferIsDue || !consumerIsReady) {
                BQ_LOGV("acquireBuffer: defer desire=%" PRId64 " expect=%" PRId64
                        " (%" PRId64 ") now=%" PRId64 " frame=%" PRIu64
                        " consumer=%" PRIu64,
                        desiredPresent, expectedPresent,
                        desiredPresent - expectedPresent,
                        systemTime(CLOCK_MONOTONIC),
                        front->mFrameNumber, maxFrameNumber);
                return PRESENT_LATER;
            }

            BQ_LOGV("acquireBuffer: accept desire=%" PRId64 " expect=%" PRId64 " "
                    "(%" PRId64 ") now=%" PRId64, desiredPresent, expectedPresent,
                    desiredPresent - expectedPresent,
                    systemTime(CLOCK_MONOTONIC));
        }

        int slot = front->mSlot;
        *outBuffer = *front;
        ATRACE_BUFFER_INDEX(slot);

        BQ_LOGV("acquireBuffer: acquiring { slot=%d/%" PRIu64 " buffer=%p }",
                slot, front->mFrameNumber, front->mGraphicBuffer->handle);
        // If the front buffer is still being tracked, update its slot state
        if (mCore->stillTracking(front)) {
            mSlots[slot].mAcquireCalled = true;
            mSlots[slot].mNeedsCleanupOnRelease = false;
            mSlots[slot].mBufferState = BufferSlot::ACQUIRED;
            mSlots[slot].mFence = Fence::NO_FENCE;
        }

        // If the buffer has previously been acquired by the consumer, set
        // mGraphicBuffer to NULL to avoid unnecessarily remapping this buffer
        // on the consumer side
        if (outBuffer->mAcquireCalled) {
            outBuffer->mGraphicBuffer = NULL;
        }

        mCore->mQueue.erase(front);

        // We might have freed a slot while dropping old buffers, or the producer
        // may be blocked waiting for the number of buffers in the queue to
        // decrease.
        mCore->mDequeueCondition.broadcast();

        ATRACE_INT(mCore->mConsumerName.string(), mCore->mQueue.size());

        mCore->validateConsistencyLocked();
    }

    if (listener != NULL) {
        for (int i = 0; i < numDroppedBuffers; ++i) {
            listener->onBufferReleased();
        }
    }

    return NO_ERROR;
}
/*==============================================================================
* Function : startThumbnailEncode
* Parameters: None
* Return Value : OMX_ERRORTYPE
* Description: Start Thumbnail Encode
==============================================================================*/
OMX_ERRORTYPE OMXJpegEncoder::startThumbnailEncode()
{
  OMX_ERRORTYPE lret = OMX_ErrorNone;
  int lrc = 0;

  QOMX_YUV_FRAME_INFO *lbufferOffset = &m_thumbnailInfo.tmbOffset;
  if (!m_inTmbPort->bEnabled) {
    lbufferOffset = &m_imageBufferOffset;
    QIDBG_ERROR("%s:%d] TMB PORT IS NOT ENABLED", __func__, __LINE__);
  }

  //Set the offset for each plane
  uint32_t lOffset[OMX_MAX_NUM_PLANES] = {lbufferOffset->yOffset,
    lbufferOffset->cbcrOffset[0] , lbufferOffset->cbcrOffset[1]};

  //Set the physical offset for each plane
  uint32_t lPhyOffset[QI_MAX_PLANES] = {0,
    lbufferOffset->cbcrStartOffset[0],
    lbufferOffset->cbcrStartOffset[1]};

  if (NULL == m_thumbEncoder) {
    if (m_thumbFormat == QI_MONOCHROME) {
      QIDBG_MED("%s:%d] Monochrome thumbnail format, switching to HW encoder",
        __func__, __LINE__);
      m_thumbEncoder = m_factory.CreateEncoder(QImageCodecFactory::HW_CODEC_ONLY,
        m_thumbEncodeParams);
    } else {
      m_thumbEncoder = m_factory.CreateEncoder(QImageCodecFactory::SW_CODEC_ONLY,
      m_thumbEncodeParams);
    }
    if (m_thumbEncoder == NULL) {
      QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
      return OMX_ErrorInsufficientResources;
    }
  }

  m_inThumbImage = new QImage(m_inputTmbPadSize, m_thumbSubsampling, m_thumbFormat,
    m_inputTmbSize);
  if (m_inThumbImage == NULL) {
    QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return OMX_ErrorInsufficientResources;
  }

  lrc = m_inThumbImage->setDefaultPlanes(m_numOfPlanes, m_inputQTmbBuffer->Addr(),
    m_inputQTmbBuffer->Fd(), lOffset, lPhyOffset);
  if (lrc) {
    QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return OMX_ErrorUndefined;
  }

  /* Allocate thumbnail buffer */
  uint32_t lThumbSize = QImage::getImageSize(m_thumbEncodeParams.OutputSize(),
    m_thumbSubsampling, m_thumbFormat);
  QIDBG_MED("%s:%d] lThumbSize %d", __func__, __LINE__, lThumbSize);
  mThumbBuffer = QIHeapBuffer::New(lThumbSize);
  if (mThumbBuffer == NULL) {
    QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return OMX_ErrorInsufficientResources;
  }

  m_outThumbImage = new QImage(mThumbBuffer->Addr(),
    mThumbBuffer->Length(), QI_BITSTREAM);
  if (m_outThumbImage == NULL) {
    QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return OMX_ErrorInsufficientResources;
  }

  m_outThumbImage->SetFilledLen(0);

  lrc = m_thumbEncoder->SetOutputMode(QImageEncoderInterface::ENORMAL_OUTPUT);
  if (lrc) {
    QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return OMX_ErrorUndefined;
  }

  lrc = m_thumbEncoder->setEncodeParams(m_thumbEncodeParams);
  if (lrc) {
    QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return OMX_ErrorUndefined;
  }

  lrc = m_thumbEncoder->addInputImage(*m_inThumbImage);
  if (lrc) {
    QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return OMX_ErrorUndefined;
  }

  if (m_IONBuffer.length > 0) {
    m_outThumbImage->setWorkBufSize(m_IONBuffer.length);
  } else {
    m_outThumbImage->setWorkBufSize(m_outThumbImage->Length());
  }

  lrc = m_thumbEncoder->addOutputImage(*m_outThumbImage);
  if (lrc) {
    QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return OMX_ErrorUndefined;
  }

  lrc = m_thumbEncoder->addObserver(*this);
  if (lrc) {
    QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
    return OMX_ErrorUndefined;
  }

  m_thumbEncoding = OMX_TRUE;
  QIDBG_HIGH("%s:%d] startThumbnailEncode()", __func__, __LINE__);
  lrc = m_thumbEncoder->Start();
  if (lrc ) {
    m_thumbEncoding = OMX_FALSE;
    QIDBG_ERROR("%s:%d] Thumbnail encoding failed to start",
      __func__, __LINE__);
    return OMX_ErrorUndefined;
  }
  ATRACE_INT("Camera:thumbnail", 1);
  QIDBG_HIGH("%s:%d] Started Thumbnail encoding", __func__, __LINE__);
  return lret;

}
/*==============================================================================
* Function : EncodeComplete
* Parameters: None
* Return Value : int
* Description: This function is called from the JPEG component when encoding is
* complete
==============================================================================*/
int OMXJpegEncoder::EncodeComplete(QImage *aOutputImage)
{
  QIDBG_MED("%s:%d] ", __func__, __LINE__);
  OMX_ERRORTYPE lret = OMX_ErrorNone;
  QIMessage *lmessage = NULL;

  QI_LOCK(&mEncodeDoneLock);
  if ((m_thumbEncoding == OMX_TRUE) && (m_outThumbImage != NULL) &&
    m_outThumbImage->BaseAddr() == aOutputImage->BaseAddr()) {
    ATRACE_INT("Camera:thumbnail", 0);
    QIDBG_HIGH("%s:%d] Thumbnail Encoding complete.",
      __func__, __LINE__);
    m_thumbEncoding = OMX_FALSE;
    m_thumbEncodingComplete = OMX_TRUE;

    if (NULL == m_memOps.get_memory) {
      lret = writeExifData(aOutputImage, m_outputQIBuffer);
      QIDBG_ERROR("%s:%d] Exif length: %d", __func__,  __LINE__,
        m_outputQIBuffer->FilledLen());
      if (QI_ERROR(lret)) {
        goto error;
      }
    }

    /* send ETB for thumbnail */
    QIMessage *lEtbMessage = new QIMessage();
    if (!lEtbMessage) {
      QIDBG_ERROR("%s:%d] Could not allocate QIMessage", __func__,  __LINE__);
      goto error_nomem;
    }
    lEtbMessage->m_qMessage = OMX_MESSAGE_ETB_DONE;
    lEtbMessage->pData = m_currentInTmbBuffHdr;
    postMessage(lEtbMessage);

    if (m_encoding_mode == OMX_Serial_Encoding) {
      /* Thumbnail exif write successful, Start main image encode */
      lmessage = new QIMessage();
      if (!lmessage) {
        QIDBG_ERROR("%s:%d] Could not allocate QIMessage", __func__,  __LINE__);
        goto error_nomem;
      }
      lmessage->m_qMessage = OMX_MESSAGE_START_MAIN_ENCODE;
      postMessage(lmessage);
      lmessage = NULL;
    } else {
      /* parallel encoding */
      QIDBG_MED("%s:%d] parallel encoding m_mainEncodingComplete %d", __func__,
        __LINE__, m_mainEncodingComplete);

      if (m_outputMainImage != NULL && m_outputMainImage->FilledLen() &&
        (OMX_TRUE == m_mainEncodingComplete)) {
        /* MainImage was finished first, now write MainImage */
        CompleteMainImage();
      }
    }
  } else if (m_outputMainImage != NULL &&
    m_outputMainImage->BaseAddr() == aOutputImage->BaseAddr()) {
    /* main image encoding complete */
    QIDBG_HIGH("%s:%d] MainImage Encoding complete. Filled "
      "Length = %d m_thumbEncodingComplete %d",
      __func__, __LINE__, m_outputMainImage->FilledLen(),
      m_thumbEncodingComplete);
    ATRACE_INT("Camera:JPEG:encode", 0);
    m_mainImageEncoding = OMX_FALSE;
    m_mainEncodingComplete = OMX_TRUE;

    if (m_encoding_mode == OMX_Serial_Encoding) {
      CompleteMainImage();
    } else {
      /* parallel encoding */

      /* thumbnail does not exist OR has already been encoded.
         Write MainImage to EXIF*/
      if (!m_inTmbPort->bEnabled ||
        (m_outThumbImage != NULL && m_outThumbImage->FilledLen()
        && (OMX_TRUE == m_thumbEncodingComplete))) {
        CompleteMainImage();
      }
    }
  }

  QI_UNLOCK(&mEncodeDoneLock);
  return QI_SUCCESS;

error:
  QI_UNLOCK(&mEncodeDoneLock);
  /* Propagate error */
  lmessage = new QIMessage();
  if (lmessage) {
    lmessage->m_qMessage = OMX_MESSAGE_EVENT_ERROR;
    lmessage->iData = lret;
    postMessage(lmessage);
  }
  return QI_ERR_GENERAL;

error_nomem:
  /* TBD: Propagate error */
  QI_UNLOCK(&mEncodeDoneLock);
  return QI_ERR_NO_MEMORY;
}
/*==============================================================================
* Function : startEncode
* Parameters: None
* Return Value : OMX_ERRORTYPE
* Description: Get the encoder from the factory and start encoding
==============================================================================*/
OMX_ERRORTYPE OMXJpegEncoder::startEncode()
{
  OMX_ERRORTYPE lret = OMX_ErrorNone;
  QImageCodecFactory::QCodecPrefType lCodecPref =
  QImageCodecFactory::HW_CODEC_PREF;
  int lrc = 0;
  QIBuffer *lOutBuf;

  //Set the offset for each plane
  uint32_t lOffset[OMX_MAX_NUM_PLANES] = {m_imageBufferOffset.yOffset,
    m_imageBufferOffset.cbcrOffset[0], m_imageBufferOffset.cbcrOffset[1]};

  //Set the physical offset for each plane
  uint32_t lPhyOffset[QI_MAX_PLANES] = {0,
    m_imageBufferOffset.cbcrStartOffset[0],
    m_imageBufferOffset.cbcrStartOffset[1]};

  for (int i = 0; i < 2; i++) {
    //Get the appropriate Encoder from the factory
    if (NULL == m_mainEncoder) {
      m_mainEncoder = m_factory.CreateEncoder(lCodecPref,
          m_mainEncodeParams);
      if (m_mainEncoder == NULL) {
        QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
        return OMX_ErrorInsufficientResources;
      }
    }
    m_inputMainImage = new QImage(m_inputPadSize, m_subsampling, m_format,
      m_inputSize);
    if (m_inputMainImage == NULL) {
      QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
      return OMX_ErrorInsufficientResources;
    }

    lrc = m_inputMainImage->setDefaultPlanes(m_numOfPlanes,
        m_inputQIBuffer->Addr(), m_inputQIBuffer->Fd(), lOffset, lPhyOffset);
    if (lrc) {
      QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
      return OMX_ErrorUndefined;
    }

    QIBuffer lIONBuffer = QIBuffer(m_IONBuffer.vaddr, m_IONBuffer.length);
    lIONBuffer.SetFilledLen(0);
    lIONBuffer.SetFd(m_IONBuffer.fd);

    if ((m_outputQIBuffer->Fd() < 0) && (lIONBuffer.Fd() > -1)) {
      lOutBuf = &lIONBuffer;
    } else {
      lOutBuf = m_outputQIBuffer;
    }

    m_outputMainImage = new QImage(lOutBuf->Addr() +
        lOutBuf->FilledLen(),
        lOutBuf->Length() - lOutBuf->FilledLen(),
        QI_BITSTREAM);

    if (m_outputMainImage == NULL) {
      QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
      return OMX_ErrorInsufficientResources;
    }

    m_outputMainImage->setFd(lOutBuf->Fd());

    lrc = m_mainEncoder->SetOutputMode(QImageEncoderInterface::ENORMAL_OUTPUT);
    if (lrc) {
      QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
      return OMX_ErrorUndefined;
    }

    lrc = m_mainEncoder->setEncodeParams(m_mainEncodeParams);
    if (lrc) {
      QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
      return OMX_ErrorUndefined;
    }

    lrc = m_mainEncoder->addInputImage(*m_inputMainImage);
    if (lrc) {
      QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
      return OMX_ErrorUndefined;
    }

    if (m_IONBuffer.length > 0) {
      m_outputMainImage->setWorkBufSize(m_IONBuffer.length);
    } else {
      m_outputMainImage->setWorkBufSize(m_outputMainImage->Length());
    }

    lrc = m_mainEncoder->addOutputImage(*m_outputMainImage);
    if (lrc) {
      QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
      return OMX_ErrorUndefined;
    }

    lrc = m_mainEncoder->addObserver(*this);
    if (lrc) {
      QIDBG_ERROR("%s:%d] failed", __func__, __LINE__);
      return OMX_ErrorUndefined;
    }

    QIDBG_ERROR("%s:%d] startEncode()", __func__, __LINE__);
    ATRACE_INT("Camera:JPEG:encode", 1);
    m_mainImageEncoding = OMX_TRUE;
    lrc = m_mainEncoder->Start();
    if (!lrc) {
      lret = OMX_ErrorNone;
      m_mainImageEncoding = OMX_TRUE;
      break;
    } else {
      delete m_mainEncoder;
      m_mainEncoder = NULL;
      lret = OMX_ErrorUndefined;
      lCodecPref = QImageCodecFactory::SW_CODEC_ONLY;
      ATRACE_INT("Camera:JPEG:encode", 0);
      QIDBG_ERROR("%s:%d] Main Image encoding failed to start, "
        "switching to alternative encoder",__func__, __LINE__);

      continue;
    }
  }

  return lret;
}
nsecs_t VideoFrameScheduler::schedule(nsecs_t renderTime) {
    nsecs_t origRenderTime = renderTime;

    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
    if (now >= mVsyncRefreshAt) {
        updateVsync();
    }

    // without VSYNC info, there is nothing to do
    if (mVsyncPeriod == 0) {
        ALOGV("no vsync: render=%lld", (long long)renderTime);
        return renderTime;
    }

    // ensure vsync time is well before (corrected) render time
    if (mVsyncTime > renderTime - 4 * mVsyncPeriod) {
        mVsyncTime -=
            ((mVsyncTime - renderTime) / mVsyncPeriod + 5) * mVsyncPeriod;
    }

    // Video presentation takes place at the VSYNC _after_ renderTime.  Adjust renderTime
    // so this effectively becomes a rounding operation (to the _closest_ VSYNC.)
    renderTime -= mVsyncPeriod / 2;

    const nsecs_t videoPeriod = mPll.addSample(origRenderTime);
    if (videoPeriod > 0) {
        // Smooth out rendering
        size_t N = 12;
        nsecs_t fiveSixthDev =
            abs(((videoPeriod * 5 + mVsyncPeriod) % (mVsyncPeriod * 6)) - mVsyncPeriod)
                    / (mVsyncPeriod / 100);
        // use 20 samples if we are doing 5:6 ratio +- 1% (e.g. playing 50Hz on 60Hz)
        if (fiveSixthDev < 12) {  /* 12% / 6 = 2% */
            N = 20;
        }

        nsecs_t offset = 0;
        nsecs_t edgeRemainder = 0;
        for (size_t i = 1; i <= N; i++) {
            offset +=
                (renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod;
            edgeRemainder += (videoPeriod * i) % mVsyncPeriod;
        }
        mTimeCorrection += mVsyncPeriod / 2 - offset / N;
        renderTime += mTimeCorrection;
        nsecs_t correctionLimit = mVsyncPeriod * 3 / 5;
        edgeRemainder = abs(edgeRemainder / N - mVsyncPeriod / 2);
        if (edgeRemainder <= mVsyncPeriod / 3) {
            correctionLimit /= 2;
        }

        // estimate how many VSYNCs a frame will spend on the display
        nsecs_t nextVsyncTime =
            renderTime + mVsyncPeriod - ((renderTime - mVsyncTime) % mVsyncPeriod);
        if (mLastVsyncTime >= 0) {
            size_t minVsyncsPerFrame = videoPeriod / mVsyncPeriod;
            size_t vsyncsForLastFrame = divRound(nextVsyncTime - mLastVsyncTime, mVsyncPeriod);
            bool vsyncsPerFrameAreNearlyConstant =
                periodicError(videoPeriod, mVsyncPeriod) / (mVsyncPeriod / 20) == 0;

            if (mTimeCorrection > correctionLimit &&
                    (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame > minVsyncsPerFrame)) {
                // remove a VSYNC
                mTimeCorrection -= mVsyncPeriod / 2;
                renderTime -= mVsyncPeriod / 2;
                nextVsyncTime -= mVsyncPeriod;
                --vsyncsForLastFrame;
            } else if (mTimeCorrection < -correctionLimit &&
                    (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame == minVsyncsPerFrame)) {
                // add a VSYNC
                mTimeCorrection += mVsyncPeriod / 2;
                renderTime += mVsyncPeriod / 2;
                nextVsyncTime += mVsyncPeriod;
                ++vsyncsForLastFrame;
            }
            ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
        }
        mLastVsyncTime = nextVsyncTime;
    }

    // align rendertime to the center between VSYNC edges
    renderTime -= (renderTime - mVsyncTime) % mVsyncPeriod;
    renderTime += mVsyncPeriod / 2;
    ALOGV("adjusting render: %lld => %lld", (long long)origRenderTime, (long long)renderTime);
    ATRACE_INT("FRAME_FLIP_IN(ms)", (renderTime - now) / 1000000);
    return renderTime;
}
Example #14
0
int main(int argc, char **argv)
{
    int ok;
    if(argc < 2) {
        printf("Enter latency tolerancy in millisec\n");
        return 0;
    }
    int thres_time = PERIOD_NS + (MILLISEC * atoi(*(argv+1)));
    printf("Latency threshold set to +%d\n", thres_time);
    bool fifo = argc == 2;
    printf("main: getpid()=%d, gettid()=%d\n", getpid(), gettid());
    if (fifo) {
        struct sched_param param;
        param.sched_priority = PRIORITY;
        ok = sched_setscheduler(gettid(), SCHED_FIFO, &param);
        printf("sched_setscheduler = %d\n", ok);
    } else {
        ok = setpriority(PRIO_PROCESS, 0 /* self */, -19);
        printf("setpriority = %d\n", ok);
    }
#ifdef USE_TIMER
    timer_t timerid;
    struct sigevent ev;
#endif
    clockid_t clockid;
    clockid = CLOCK_MONOTONIC;
#ifdef USE_TIMER
    ev.sigev_notify = SIGEV_THREAD;
    ev.sigev_signo = 0;
    ev.sigev_value.sival_int = 0;
    ev.sigev_notify_function = notify_function;
    ev.sigev_notify_attributes = NULL;
    //ev.sigev_notify_thread_id = 0;
    ok = timer_create(clockid, &ev, &timerid);
    //printf("timer_create ok=%d, timerid=%p\n", ok, timerid);
#endif
    ok = clock_gettime(CLOCK_MONOTONIC, &previous);
    //printf("clock_gettime ok=%d\n", ok);
#ifdef USE_TIMER
    int flags = 0;
    struct itimerspec new_;
    struct itimerspec old;
    new_.it_interval.tv_sec = 0;
    new_.it_interval.tv_nsec = PERIOD_NS;
    new_.it_value.tv_sec = 0;
    new_.it_value.tv_nsec = PERIOD_NS;
#endif
    int seconds = (int) (((long long) PERIOD_NS * (long long) MAX_COUNT) / 1000000000LL);
    printf("please wait %d seconds\n", seconds);
#ifdef USE_TIMER
    ok = timer_settime(timerid, flags, &new_, &old);
    //printf("timer_settime ok=%d\n", ok);
    sleep(seconds + 1);
    ok = timer_delete(timerid);
    //printf("\ntimer_delete ok=%d\n", ok);
#else
    struct timespec delay;
    delay.tv_sec = 0;
    delay.tv_nsec = PERIOD_NS;
    for (count = 0; count < MAX_COUNT; ++count) {
        {
            android::ScopedTrace(ATRACE_TAG, "nanosleep");
            nanosleep(&delay, NULL);
        }
        struct timespec ts;
        {
            android::ScopedTrace(ATRACE_TAG, "clock_gettime");
            ok = clock_gettime(CLOCK_MONOTONIC, &ts);
        }
        if (0 == ok) {
            unsigned delta_sec = ts.tv_sec - previous.tv_sec;
            int delta_ns = ts.tv_nsec - previous.tv_nsec;
            if (delta_ns < 0) {
                delta_ns += 1000000000;
                --delta_sec;
            }
            
            if(delta_ns > thres_time) {
                printf("[%d] Iterations passed\n", count);
                printf("delta exceeding at %lu.%09lu\n", delta_sec, delta_ns);
                return -1;
            }

            struct timespec delta_x;
            delta_x.tv_sec = delta_sec;
            delta_x.tv_nsec = delta_ns;
            delta_ts[count] = delta_x;
            previous = ts;
            ATRACE_INT("cycle_us", delta_ns / 1000);
        }
    }
#endif
    printf("expected samples: %d, actual samples: %d\n", MAX_COUNT, count);
    qsort(delta_ts, count, sizeof(struct timespec), compar);
    printf("99.8%% CDF, ideal is all ~%d ns:\n", PERIOD_NS);
    int i;
    for (i = (count * 998) / 1000; i < count; ++i) {
        printf("%lu.%09lu\n", delta_ts[i].tv_sec, delta_ts[i].tv_nsec);
    }
    return EXIT_SUCCESS;
}