// This test is intended to verify that proper synchronization is done when
// rendering into an FBO.
TEST_F(SurfaceTextureFBOTest, BlitFromCpuFilledBufferToFbo) {
    const int texWidth = 64;
    const int texHeight = 64;

    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
            texWidth, texHeight, HAL_PIXEL_FORMAT_RGBA_8888));
    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));

    android_native_buffer_t* anb;
    ASSERT_EQ(NO_ERROR, native_window_dequeue_buffer_and_wait(mANW.get(),
            &anb));
    ASSERT_TRUE(anb != NULL);

    sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));

    // Fill the buffer with green
    uint8_t* img = NULL;
    buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
    fillRGBA8BufferSolid(img, texWidth, texHeight, buf->getStride(), 0, 255,
            0, 255);
    buf->unlock();
    ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer(),
            -1));

    ASSERT_EQ(NO_ERROR, mST->updateTexImage());

    glBindFramebuffer(GL_FRAMEBUFFER, mFbo);
    drawTexture();
    glBindFramebuffer(GL_FRAMEBUFFER, 0);

    for (int i = 0; i < 4; i++) {
        SCOPED_TRACE(String8::format("frame %d", i).string());

        ASSERT_EQ(NO_ERROR, native_window_dequeue_buffer_and_wait(mANW.get(),
                &anb));
        ASSERT_TRUE(anb != NULL);

        buf = new GraphicBuffer(anb, false);

        // Fill the buffer with red
        ASSERT_EQ(NO_ERROR, buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN,
                (void**)(&img)));
        fillRGBA8BufferSolid(img, texWidth, texHeight, buf->getStride(), 255, 0,
                0, 255);
        ASSERT_EQ(NO_ERROR, buf->unlock());
        ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(),
                buf->getNativeBuffer(), -1));

        ASSERT_EQ(NO_ERROR, mST->updateTexImage());

        drawTexture();

        EXPECT_TRUE(checkPixel( 24, 39, 255, 0, 0, 255));
    }

    glBindFramebuffer(GL_FRAMEBUFFER, mFbo);

    EXPECT_TRUE(checkPixel( 24, 39, 0, 255, 0, 255));
}
示例#2
0
static bool displayOneVideoFrameAndroid(int32_t fd, int32_t index)
{
    int32_t ioctlRet = -1;
    struct v4l2_buffer buffer;
    memset(&buffer, 0, sizeof(buffer));

    if (mNativeWindow->queueBuffer(mNativeWindow.get(), mWindBuff[index], -1) != 0) {
        fprintf(stderr, "queue buffer to native window failed\n");
        return false;
    }

    ANativeWindowBuffer* pbuf = NULL;
    status_t err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &pbuf);
    if (err != 0) {
        fprintf(stderr, "dequeueBuffer failed: %s (%d)\n", strerror(-err), -err);
        return false;
    }

    buffer.m.userptr = (unsigned long)pbuf;
    buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    uint32_t i;
    for (i = 0; i < mWindBuff.size(); i++) {
        if (pbuf == mWindBuff[i]) {
            buffer.index = i;
            break;
        }
    }
    if (i == mWindBuff.size())
        return false;

    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_QBUF, &buffer);
    ASSERT(ioctlRet != -1);

    return true;
}
示例#3
0
bool EncodeInputSurface::getOneFrameInput(VideoFrameRawData& inputBuffer)
{
    ANativeWindowBuffer* anb = NULL;
    memset(&inputBuffer, 0, sizeof(inputBuffer));
    inputBuffer.memoryType = VIDEO_DATA_MEMORY_TYPE_ANDROID_NATIVE_BUFFER;

    anb = mBufferInfo.front();
    mBufferInfo.pop();
    DEBUG("queueBuffer anb: %p\n", anb);
    int ret = GET_ANATIVEWINDOW(mNativeWindow)->queueBuffer(GET_ANATIVEWINDOW(mNativeWindow), anb, -1);
    if (ret != 0) {
        ERROR("queueBuffer failed: %s (%d)", strerror(-ret), -ret);
        return false;
    }

    ret = native_window_dequeue_buffer_and_wait(
        GET_ANATIVEWINDOW(mNativeWindow), &anb);
    if (ret != 0) {
        ERROR("native_window_dequeue_Buffer failed: (%d)\n", ret);
        return false;
    }

    inputBuffer.handle = (intptr_t)anb;
    DEBUG("get ANativeWindowBuffer: %p from surface to encode\n", anb);
    // FIXME, push it to queue after finish encoding
    mBufferInfo.push(anb);

    return true;
}
	inline int dequeueBuffer(ANativeWindow* window, ANativeWindowBuffer** buffer)
	{
		#if ANDROID_PLATFORM_SDK_VERSION > 16
			return native_window_dequeue_buffer_and_wait(window, buffer);
		#else
			return window->dequeueBuffer(window, buffer);
		#endif
	}
// This test probably doesn't belong here.
TEST_F(SurfaceTest, ScreenshotsOfProtectedBuffersSucceed) {
    sp<ANativeWindow> anw(mSurface);

    // Verify the screenshot works with no protected buffers.
    sp<CpuConsumer> consumer = new CpuConsumer(1);
    sp<ISurfaceComposer> sf(ComposerService::getComposerService());
    sp<IBinder> display(sf->getBuiltInDisplay(ISurfaceComposer::eDisplayIdMain));
    ASSERT_EQ(NO_ERROR, sf->captureScreen(display, consumer->getBufferQueue(),
            64, 64, 0, 0x7fffffff, true));

    // Set the PROTECTED usage bit and verify that the screenshot fails.  Note
    // that we need to dequeue a buffer in order for it to actually get
    // allocated in SurfaceFlinger.
    ASSERT_EQ(NO_ERROR, native_window_set_usage(anw.get(),
            GRALLOC_USAGE_PROTECTED));
    ASSERT_EQ(NO_ERROR, native_window_set_buffer_count(anw.get(), 3));
    ANativeWindowBuffer* buf = 0;

    status_t err = native_window_dequeue_buffer_and_wait(anw.get(), &buf);
    if (err) {
        // we could fail if GRALLOC_USAGE_PROTECTED is not supported.
        // that's okay as long as this is the reason for the failure.
        // try again without the GRALLOC_USAGE_PROTECTED bit.
        ASSERT_EQ(NO_ERROR, native_window_set_usage(anw.get(), 0));
        ASSERT_EQ(NO_ERROR, native_window_dequeue_buffer_and_wait(anw.get(),
                &buf));
        return;
    }
    ASSERT_EQ(NO_ERROR, anw->cancelBuffer(anw.get(), buf, -1));

    for (int i = 0; i < 4; i++) {
        // Loop to make sure SurfaceFlinger has retired a protected buffer.
        ASSERT_EQ(NO_ERROR, native_window_dequeue_buffer_and_wait(anw.get(),
                &buf));
        ASSERT_EQ(NO_ERROR, anw->queueBuffer(anw.get(), buf, -1));
    }
    ASSERT_EQ(NO_ERROR, sf->captureScreen(display, consumer->getBufferQueue(),
            64, 64, 0, 0x7fffffff, true));
}
bool BufferProducerThread::threadLoop() {
    Mutex::Autolock autoLock(&mLock);

    status_t err = NO_ERROR;
    if (mSurface == NULL) {
        err = mCondition.waitRelative(mLock, s2ns(1));
        // It's OK to time out here.
        if (err != NO_ERROR && err != TIMED_OUT) {
            ALOGE("error %d while wating for non-null surface to be set", err);
            return false;
        }
        return true;
    }
    sp<ANativeWindow> anw(mSurface);
    while (mBufferState == CAPTURING) {
        err = mCondition.waitRelative(mLock, s2ns(1));
        if (err != NO_ERROR) {
            ALOGE("error %d while wating for buffer state to change.", err);
            return false;
        }
    }
    if (mBufferState == CAPTURED && anw != NULL) {
        err = anw->queueBuffer(anw.get(), mBuffer.get(), -1);
        if (err != NO_ERROR) {
            ALOGE("error %d while queueing buffer to surface", err);
            return false;
        }
        mBuffer.clear();
        mBufferState = RELEASED;
    }
    if (mBuffer == NULL && !mShutdown && anw != NULL) {
        ANativeWindowBuffer_t* buffer = NULL;
        err = native_window_dequeue_buffer_and_wait(anw.get(), &buffer);
        if (err != NO_ERROR) {
            ALOGE("error %d while dequeueing buffer to surface", err);
            return false;
        }
        mBuffer = buffer;
        mBufferState = CAPTURING;
        mDevice->request_capture(mDevice, mDeviceId, mStream.stream_id,
                                 buffer->handle, ++mSeq);
    }

    return true;
}
void NativeWindowRenderer::queueExternalBuffer(ANativeWindow* anw,
    MediaBuffer* buffer, int width, int height) {
    native_window_set_buffers_geometry(anw, width, height,
            HAL_PIXEL_FORMAT_YV12);
    native_window_set_usage(anw, GRALLOC_USAGE_SW_WRITE_OFTEN);

    ANativeWindowBuffer* anb;
    CHECK(NO_ERROR == native_window_dequeue_buffer_and_wait(anw, &anb));
    CHECK(anb != NULL);

    // Copy the buffer
    uint8_t* img = NULL;
    sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
    buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
    copyI420Buffer(buffer, img, width, height, buf->getStride());
    buf->unlock();
    CHECK(NO_ERROR == anw->queueBuffer(anw, buf->getNativeBuffer(), -1));
}
int StreamAdapter::dequeue_buffer(const camera2_stream_ops_t *w,
        buffer_handle_t** buffer) {
    int res;
    int state = static_cast<const StreamAdapter*>(w)->mState;
    if (state != ACTIVE) {
        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
        return INVALID_OPERATION;
    }

    ANativeWindow *a = toANW(w);
    ANativeWindowBuffer* anb;
    res = native_window_dequeue_buffer_and_wait(a, &anb);
    if (res != OK) return res;

    *buffer = &(anb->handle);

    return res;
}
示例#9
0
bool EncodeInputSurface::prepareInputBuffer()
{
    int i;
    int ret;
    ANativeWindowBuffer* anb = NULL;
    INFO("init %d input surfaces, this can take some time\n", m_bufferCount);

    for (i = 0; i < m_bufferCount; i++) {
        void* ptr = NULL;
        anb = NULL;
        ret = native_window_dequeue_buffer_and_wait(GET_ANATIVEWINDOW(mNativeWindow), &anb);
        if (ret != 0) {
            ERROR("native_window_dequeue_Buffer failed: (%d)\n", ret);
            return false;
        }

        sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(anb, false);
        graphicBuffer->lock(GraphicBuffer::USAGE_SW_WRITE_OFTEN, &ptr);

        DEBUG("ptr: %p, m_width: %d, m_height: %d\n", ptr, m_width, m_height);
        fillSourceBuffer((uint8_t*)ptr, m_width, m_height);

        graphicBuffer->unlock();
        DEBUG("%d, fille anb: %p done\n", i, anb);

        mBufferInfo.push(anb);
    }

    for (i = 0; i < 2; i++) {
        anb = mBufferInfo.front();
        DEBUG("cancelBuffer: %d, and: %p\n", i, anb);
        ret = CAST_ANATIVEWINDOW(mNativeWindow)->cancelBuffer(GET_ANATIVEWINDOW(mNativeWindow), anb, -1);
        mBufferInfo.pop();
        CHECK_RET(ret, "cancelBuffer");
    }

    INFO("init input surface finished\n");

    return true;
}
ACodec::BufferInfo * NuPlayerVPPProcessor::dequeueBufferFromNativeWindow() {
    ANativeWindowBuffer *buf;
    if (native_window_dequeue_buffer_and_wait(mNativeWindow->getNativeWindow().get(), &buf) != 0) {
        LOGE("dequeueBuffer failed.");
        return NULL;
    }

    for (size_t i = mBufferInfos->size(); i-- > 0;) {
        ACodec::BufferInfo *info = &mBufferInfos->editItemAt(i);

        if (info->mGraphicBuffer->handle == buf->handle) {
            CHECK_EQ((int)info->mStatus,
                     (int)ACodec::BufferInfo::OWNED_BY_NATIVE_WINDOW);

            info->mStatus = ACodec::BufferInfo::OWNED_BY_VPP;
            LOGV("dequeueBufferFromNativeWindow graphicBuffer = %p", info->mGraphicBuffer.get());

            return info;
        }
    }

    return NULL;
}
status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */) {
    status_t err = NO_ERROR;
    ANativeWindowBuffer* anb = NULL;
    int numBufs = 0;
    int minUndequeuedBufs = 0;

    // We need to reconnect to the ANativeWindow as a CPU client to ensure that
    // no frames get dropped by SurfaceFlinger assuming that these are video
    // frames.
    err = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
    if (err != NO_ERROR) {
        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)", strerror(-err), -err);
        return err;
    }

    err = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_CPU);
    if (err != NO_ERROR) {
        ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
        (void)native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
        return err;
    }

    err = setNativeWindowSizeFormatAndUsage(
            nativeWindow, 1, 1, HAL_PIXEL_FORMAT_RGBX_8888, 0, GRALLOC_USAGE_SW_WRITE_OFTEN);
    if (err != NO_ERROR) {
        goto error;
    }

    static_cast<Surface*>(nativeWindow)->getIGraphicBufferProducer()->allowAllocation(true);

    err = nativeWindow->query(nativeWindow,
            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs);
    if (err != NO_ERROR) {
        ALOGE("error pushing blank frames: MIN_UNDEQUEUED_BUFFERS query "
                "failed: %s (%d)", strerror(-err), -err);
        goto error;
    }

    numBufs = minUndequeuedBufs + 1;
    err = native_window_set_buffer_count(nativeWindow, numBufs);
    if (err != NO_ERROR) {
        ALOGE("error pushing blank frames: set_buffer_count failed: %s (%d)", strerror(-err), -err);
        goto error;
    }

    // We push numBufs + 1 buffers to ensure that we've drawn into the same
    // buffer twice.  This should guarantee that the buffer has been displayed
    // on the screen and then been replaced, so an previous video frames are
    // guaranteed NOT to be currently displayed.
    for (int i = 0; i < numBufs + 1; i++) {
        err = native_window_dequeue_buffer_and_wait(nativeWindow, &anb);
        if (err != NO_ERROR) {
            ALOGE("error pushing blank frames: dequeueBuffer failed: %s (%d)",
                    strerror(-err), -err);
            break;
        }

        sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));

        // Fill the buffer with the a 1x1 checkerboard pattern ;)
        uint32_t *img = NULL;
        err = buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
        if (err != NO_ERROR) {
            ALOGE("error pushing blank frames: lock failed: %s (%d)", strerror(-err), -err);
            break;
        }

        *img = 0;

        err = buf->unlock();
        if (err != NO_ERROR) {
            ALOGE("error pushing blank frames: unlock failed: %s (%d)", strerror(-err), -err);
            break;
        }

        err = nativeWindow->queueBuffer(nativeWindow, buf->getNativeBuffer(), -1);
        if (err != NO_ERROR) {
            ALOGE("error pushing blank frames: queueBuffer failed: %s (%d)", strerror(-err), -err);
            break;
        }

        anb = NULL;
    }

error:

    if (anb != NULL) {
        nativeWindow->cancelBuffer(nativeWindow, anb, -1);
        anb = NULL;
    }

    // Clean up after success or error.
    status_t err2 = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_CPU);
    if (err2 != NO_ERROR) {
        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)", strerror(-err2), -err2);
        if (err == NO_ERROR) {
            err = err2;
        }
    }

    err2 = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
    if (err2 != NO_ERROR) {
        ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
        if (err == NO_ERROR) {
            err = err2;
        }
    }

    return err;
}
void
FakeSurfaceComposer::captureScreenImp(const sp<IGraphicBufferProducer>& producer,
                                      uint32_t reqWidth,
                                      uint32_t reqHeight,
                                      const sp<GraphicProducerWrapper>& wrapper)
{
    MOZ_ASSERT(NS_IsMainThread());
    MOZ_ASSERT(wrapper.get());

    RefPtr<nsScreenGonk> screen = nsScreenManagerGonk::GetPrimaryScreen();

    // get screen geometry
    nsIntRect screenBounds = screen->GetNaturalBounds().ToUnknownRect();
    const uint32_t hw_w = screenBounds.width;
    const uint32_t hw_h = screenBounds.height;

    if (reqWidth > hw_w || reqHeight > hw_h) {
        ALOGE("size mismatch (%d, %d) > (%d, %d)",
                reqWidth, reqHeight, hw_w, hw_h);
        static_cast<GraphicProducerWrapper*>(producer->asBinder().get())->exit(BAD_VALUE);
        return;
    }

    reqWidth  = (!reqWidth)  ? hw_w : reqWidth;
    reqHeight = (!reqHeight) ? hw_h : reqHeight;

    nsScreenGonk* screenPtr = screen.forget().take();
    nsCOMPtr<nsIRunnable> runnable =
        NS_NewRunnableFunction([screenPtr, reqWidth, reqHeight, producer, wrapper]() {
            // create a surface (because we're a producer, and we need to
            // dequeue/queue a buffer)
            sp<Surface> sur = new Surface(producer);
            ANativeWindow* window = sur.get();

            if (native_window_api_connect(window, NATIVE_WINDOW_API_EGL) != NO_ERROR) {
                static_cast<GraphicProducerWrapper*>(producer->asBinder().get())->exit(BAD_VALUE);
                NS_ReleaseOnMainThread(screenPtr);
                return;
            }
            uint32_t usage = GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
                             GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE;

            int err = 0;
            err = native_window_set_buffers_dimensions(window, reqWidth, reqHeight);
            err |= native_window_set_scaling_mode(window, NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
            err |= native_window_set_buffers_format(window, HAL_PIXEL_FORMAT_RGBA_8888);
            err |= native_window_set_usage(window, usage);

            status_t result = NO_ERROR;
            if (err == NO_ERROR) {
                ANativeWindowBuffer* buffer;
                result = native_window_dequeue_buffer_and_wait(window,  &buffer);
                if (result == NO_ERROR) {
                    nsresult rv = screenPtr->MakeSnapshot(buffer);
                    if (rv != NS_OK) {
                        result = INVALID_OPERATION;
                    }
                    window->queueBuffer(window, buffer, -1);
                }
            } else {
                result = BAD_VALUE;
            }
            native_window_api_disconnect(window, NATIVE_WINDOW_API_EGL);
            static_cast<GraphicProducerWrapper*>(producer->asBinder().get())->exit(result);
            NS_ReleaseOnMainThread(screenPtr);
        });

    mozilla::layers::CompositorParent::CompositorLoop()->PostTask(
        FROM_HERE, new RunnableCallTask(runnable));
}
status_t StreamAdapter::connectToDevice(camera2_device_t *d,
        uint32_t width, uint32_t height, int format) {
    if (mState != UNINITIALIZED) return INVALID_OPERATION;
    if (d == NULL) {
        ALOGE("%s: Null device passed to stream adapter", __FUNCTION__);
        return BAD_VALUE;
    }

    status_t res;

    mWidth = width;
    mHeight = height;
    mFormat = format;

    // Allocate device-side stream interface

    uint32_t id;
    uint32_t formatActual; // ignored
    uint32_t usage;
    uint32_t maxBuffers = 2;
    res = d->ops->allocate_stream(d,
            mWidth, mHeight, mFormat, getStreamOps(),
            &id, &formatActual, &usage, &maxBuffers);
    if (res != OK) {
        ALOGE("%s: Device stream allocation failed: %s (%d)",
                __FUNCTION__, strerror(-res), res);
        mState = UNINITIALIZED;
        return res;
    }
    mDevice = d;

    mId = id;
    mUsage = usage;
    mMaxProducerBuffers = maxBuffers;

    // Configure consumer-side ANativeWindow interface

    res = native_window_api_connect(mConsumerInterface.get(),
            NATIVE_WINDOW_API_CAMERA);
    if (res != OK) {
        ALOGE("%s: Unable to connect to native window for stream %d",
                __FUNCTION__, mId);
        mState = ALLOCATED;
        return res;
    }

    res = native_window_set_usage(mConsumerInterface.get(), mUsage);
    if (res != OK) {
        ALOGE("%s: Unable to configure usage %08x for stream %d",
                __FUNCTION__, mUsage, mId);
        mState = CONNECTED;
        return res;
    }

    res = native_window_set_buffers_geometry(mConsumerInterface.get(),
            mWidth, mHeight, mFormat);
    if (res != OK) {
        ALOGE("%s: Unable to configure buffer geometry"
                " %d x %d, format 0x%x for stream %d",
                __FUNCTION__, mWidth, mHeight, mFormat, mId);
        mState = CONNECTED;
        return res;
    }

    int maxConsumerBuffers;
    res = mConsumerInterface->query(mConsumerInterface.get(),
            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
    if (res != OK) {
        ALOGE("%s: Unable to query consumer undequeued"
                " buffer count for stream %d", __FUNCTION__, mId);
        mState = CONNECTED;
        return res;
    }
    mMaxConsumerBuffers = maxConsumerBuffers;

    ALOGV("%s: Producer wants %d buffers, consumer wants %d", __FUNCTION__,
            mMaxProducerBuffers, mMaxConsumerBuffers);

    int totalBuffers = mMaxConsumerBuffers + mMaxProducerBuffers;

    res = native_window_set_buffer_count(mConsumerInterface.get(),
            totalBuffers);
    if (res != OK) {
        ALOGE("%s: Unable to set buffer count for stream %d",
                __FUNCTION__, mId);
        mState = CONNECTED;
        return res;
    }

    // Register allocated buffers with HAL device
    buffer_handle_t *buffers = new buffer_handle_t[totalBuffers];
    ANativeWindowBuffer **anwBuffers = new ANativeWindowBuffer*[totalBuffers];
    int bufferIdx = 0;
    for (; bufferIdx < totalBuffers; bufferIdx++) {
        res = native_window_dequeue_buffer_and_wait(mConsumerInterface.get(),
                &anwBuffers[bufferIdx]);
        if (res != OK) {
            ALOGE("%s: Unable to dequeue buffer %d for initial registration for"
                    "stream %d", __FUNCTION__, bufferIdx, mId);
            mState = CONNECTED;
            goto cleanUpBuffers;
        }
        buffers[bufferIdx] = anwBuffers[bufferIdx]->handle;
    }

    res = mDevice->ops->register_stream_buffers(mDevice,
            mId,
            totalBuffers,
            buffers);
    if (res != OK) {
        ALOGE("%s: Unable to register buffers with HAL device for stream %d",
                __FUNCTION__, mId);
        mState = CONNECTED;
    } else {
        mState = ACTIVE;
    }

cleanUpBuffers:
    for (int i = 0; i < bufferIdx; i++) {
        res = mConsumerInterface->cancelBuffer(mConsumerInterface.get(),
                anwBuffers[i], -1);
    }
    delete anwBuffers;
    delete buffers;

    return res;
}
示例#14
0
int main(int argc, char** argv)
{
    DecodeInput *input;
    int32_t fd = -1;
    int32_t i = 0;
    int32_t ioctlRet = -1;
    YamiMediaCodec::CalcFps calcFps;

    renderMode = 3; // set default render mode to 3

    yamiTraceInit();
#if __ENABLE_V4L2_GLX__
    XInitThreads();
#endif

#if __ENABLE_V4L2_OPS__
    // FIXME, use libv4l2codec_hw.so instead
    if (!loadV4l2CodecDevice("libyami_v4l2.so")) {
        ERROR("fail to init v4l2codec device with __ENABLE_V4L2_OPS__\n");
        return -1;
    }
#endif

    if (!process_cmdline(argc, argv))
        return -1;

    if (!inputFileName) {
        ERROR("no input media file specified\n");
        return -1;
    }
    INFO("input file: %s, renderMode: %d", inputFileName, renderMode);

    if (!dumpOutputName)
        dumpOutputName = strdup ("./");

#if !__ENABLE_V4L2_GLX__
    switch (renderMode) {
    case 0:
        memoryType = VIDEO_DATA_MEMORY_TYPE_RAW_COPY;
        memoryTypeStr = typeStrRawData;
    break;
    case 3:
        memoryType = VIDEO_DATA_MEMORY_TYPE_DRM_NAME;
        memoryTypeStr = typeStrDrmName;
    break;
    case 4:
        memoryType = VIDEO_DATA_MEMORY_TYPE_DMA_BUF;
        memoryTypeStr = typeStrDmaBuf;
    break;
    default:
        ASSERT(0 && "unsupported render mode, -m [0,3,4] are supported");
    break;
    }
#endif

    input = DecodeInput::create(inputFileName);
    if (input==NULL) {
        ERROR("fail to init input stream\n");
        return -1;
    }

    renderFrameCount = 0;
    calcFps.setAnchor();
    // open device
    fd = SIMULATE_V4L2_OP(Open)("decoder", 0);
    ASSERT(fd!=-1);

#ifdef ANDROID
#elif __ENABLE_V4L2_GLX__
    x11Display = XOpenDisplay(NULL);
    ASSERT(x11Display);
    ioctlRet = SIMULATE_V4L2_OP(SetXDisplay)(fd, x11Display);
#endif
    // set output frame memory type
#if __ENABLE_V4L2_OPS__
    SIMULATE_V4L2_OP(SetParameter)(fd, "frame-memory-type", memoryTypeStr);
#elif !__ENABLE_V4L2_GLX__
    SIMULATE_V4L2_OP(FrameMemoryType)(fd, memoryType);
#endif

    // query hw capability
    struct v4l2_capability caps;
    memset(&caps, 0, sizeof(caps));
    caps.capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_QUERYCAP, &caps);
    ASSERT(ioctlRet != -1);

    // set input/output data format
    uint32_t codecFormat = v4l2PixelFormatFromMime(input->getMimeType());
    if (!codecFormat) {
        ERROR("unsupported mimetype, %s", input->getMimeType());
        return -1;
    }

    struct v4l2_format format;
    memset(&format, 0, sizeof(format));
    format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
    format.fmt.pix_mp.pixelformat = codecFormat;
    format.fmt.pix_mp.num_planes = 1;
    format.fmt.pix_mp.plane_fmt[0].sizeimage = k_maxInputBufferSize;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_S_FMT, &format);
    ASSERT(ioctlRet != -1);

    // set preferred output format
    memset(&format, 0, sizeof(format));
    format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    uint8_t* data = (uint8_t*)input->getCodecData().data();
    uint32_t size = input->getCodecData().size();
    //save codecdata, size+data, the type of format.fmt.raw_data is __u8[200]
    //we must make sure enough space (>=sizeof(uint32_t) + size) to store codecdata
    memcpy(format.fmt.raw_data, &size, sizeof(uint32_t));
    if(sizeof(format.fmt.raw_data) >= size + sizeof(uint32_t))
        memcpy(format.fmt.raw_data + sizeof(uint32_t), data, size);
    else {
        ERROR("No enough space to store codec data");
        return -1;
    }
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_S_FMT, &format);
    ASSERT(ioctlRet != -1);

    // input port starts as early as possible to decide output frame format
    __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_STREAMON, &type);
    ASSERT(ioctlRet != -1);

    // setup input buffers
    struct v4l2_requestbuffers reqbufs;
    memset(&reqbufs, 0, sizeof(reqbufs));
    reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
    reqbufs.memory = V4L2_MEMORY_MMAP;
    reqbufs.count = 2;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_REQBUFS, &reqbufs);
    ASSERT(ioctlRet != -1);
    ASSERT(reqbufs.count>0);
    inputQueueCapacity = reqbufs.count;
    inputFrames.resize(inputQueueCapacity);

    for (i=0; i<inputQueueCapacity; i++) {
        struct v4l2_plane planes[k_inputPlaneCount];
        struct v4l2_buffer buffer;
        memset(&buffer, 0, sizeof(buffer));
        memset(planes, 0, sizeof(planes));
        buffer.index = i;
        buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
        buffer.memory = V4L2_MEMORY_MMAP;
        buffer.m.planes = planes;
        buffer.length = k_inputPlaneCount;
        ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_QUERYBUF, &buffer);
        ASSERT(ioctlRet != -1);

        // length and mem_offset should be filled by VIDIOC_QUERYBUF above
        void* address = SIMULATE_V4L2_OP(Mmap)(NULL,
                                      buffer.m.planes[0].length,
                                      PROT_READ | PROT_WRITE,
                                      MAP_SHARED, fd,
                                      buffer.m.planes[0].m.mem_offset);
        ASSERT(address);
        inputFrames[i] = static_cast<uint8_t*>(address);
        DEBUG("inputFrames[%d] = %p", i, inputFrames[i]);
    }

    // feed input frames first
    for (i=0; i<inputQueueCapacity; i++) {
        if (!feedOneInputFrame(input, fd, i)) {
            break;
        }
    }

    // query video resolution
    memset(&format, 0, sizeof(format));
    format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    while (1) {
        if (SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_G_FMT, &format) != 0) {
            if (errno != EINVAL) {
                // EINVAL means we haven't seen sufficient stream to decode the format.
                INFO("ioctl() failed: VIDIOC_G_FMT, haven't get video resolution during start yet, waiting");
            }
        } else {
            break;
        }
        usleep(50);
    }
    outputPlaneCount = format.fmt.pix_mp.num_planes;
    ASSERT(outputPlaneCount == 2);
    videoWidth = format.fmt.pix_mp.width;
    videoHeight = format.fmt.pix_mp.height;
    ASSERT(videoWidth && videoHeight);

#ifdef ANDROID
    __u32 pixelformat = format.fmt.pix_mp.pixelformat;
    if (!createNativeWindow(pixelformat)) {
        fprintf(stderr, "create native window error\n");
        return -1;
    }

    int minUndequeuedBuffs = 0;
    status_t err = mNativeWindow->query(mNativeWindow.get(), NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffs);
    if (err != 0) {
        fprintf(stderr, "query native window min undequeued buffers error\n");
        return err;
    }
#endif

    // setup output buffers
    // Number of output buffers we need.
    struct v4l2_control ctrl;
    memset(&ctrl, 0, sizeof(ctrl));
    ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_G_CTRL, &ctrl);
#ifndef ANDROID
    uint32_t minOutputFrameCount = ctrl.value + k_extraOutputFrameCount;
#else
    uint32_t minOutputFrameCount = ctrl.value + k_extraOutputFrameCount + minUndequeuedBuffs;
#endif

    memset(&reqbufs, 0, sizeof(reqbufs));
    reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    reqbufs.memory = V4L2_MEMORY_MMAP;
    reqbufs.count = minOutputFrameCount;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_REQBUFS, &reqbufs);
    ASSERT(ioctlRet != -1);
    ASSERT(reqbufs.count>0);
    outputQueueCapacity = reqbufs.count;

#ifdef ANDROID
#elif __ENABLE_V4L2_GLX__
    x11Window = XCreateSimpleWindow(x11Display, DefaultRootWindow(x11Display)
        , 0, 0, videoWidth, videoHeight, 0, 0
        , WhitePixel(x11Display, 0));
    XMapWindow(x11Display, x11Window);
    pixmaps.resize(outputQueueCapacity);
    glxPixmaps.resize(outputQueueCapacity);
    textureIds.resize(outputQueueCapacity);

    if (!glxContext) {
        glxContext = glxInit(x11Display, x11Window);
    }
    ASSERT(glxContext);

    glGenTextures(outputQueueCapacity, &textureIds[0] );
    for (i=0; i<outputQueueCapacity; i++) {
        int ret = createPixmapForTexture(glxContext, textureIds[i], videoWidth, videoHeight, &pixmaps[i], &glxPixmaps[i]);
        DEBUG("textureIds[%d]: 0x%x, pixmaps[%d]=0x%lx, glxPixmaps[%d]: 0x%lx", i, textureIds[i], i, pixmaps[i], i, glxPixmaps[i]);
        ASSERT(ret == 0);
        ret = SIMULATE_V4L2_OP(UsePixmap)(fd, i, pixmaps[i]);
        ASSERT(ret == 0);
    }
#else
    if (IS_RAW_DATA()) {
        rawOutputFrames.resize(outputQueueCapacity);
        for (i=0; i<outputQueueCapacity; i++) {
            struct v4l2_plane planes[k_maxOutputPlaneCount];
            struct v4l2_buffer buffer;
            memset(&buffer, 0, sizeof(buffer));
            memset(planes, 0, sizeof(planes));
            buffer.index = i;
            buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
            buffer.memory = V4L2_MEMORY_MMAP;
            buffer.m.planes = planes;
            buffer.length = outputPlaneCount;
            ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_QUERYBUF, &buffer);
            ASSERT(ioctlRet != -1);

            rawOutputFrames[i].width = format.fmt.pix_mp.width;
            rawOutputFrames[i].height = format.fmt.pix_mp.height;
            rawOutputFrames[i].fourcc = format.fmt.pix_mp.pixelformat;

            for (int j=0; j<outputPlaneCount; j++) {
                // length and mem_offset are filled by VIDIOC_QUERYBUF above
                void* address = SIMULATE_V4L2_OP(Mmap)(NULL,
                                              buffer.m.planes[j].length,
                                              PROT_READ | PROT_WRITE,
                                              MAP_SHARED, fd,
                                              buffer.m.planes[j].m.mem_offset);
                ASSERT(address);
                if (j == 0) {
                    rawOutputFrames[i].data = static_cast<uint8_t*>(address);
                    rawOutputFrames[i].offset[0] = 0;
                } else {
                    rawOutputFrames[i].offset[j] = static_cast<uint8_t*>(address) - rawOutputFrames[i].data;
                }

                rawOutputFrames[i].pitch[j] = format.fmt.pix_mp.plane_fmt[j].bytesperline;
            }
        }
    } else if (IS_DMA_BUF() || IS_DRM_NAME()) {
        // setup all textures and eglImages
        eglImages.resize(outputQueueCapacity);
        textureIds.resize(outputQueueCapacity);

        if (!eglContext)
            eglContext = eglInit(x11Display, x11Window, 0 /*VA_FOURCC_RGBA*/, IS_DMA_BUF());

        glGenTextures(outputQueueCapacity, &textureIds[0] );
        for (i=0; i<outputQueueCapacity; i++) {
             int ret = 0;
             ret = SIMULATE_V4L2_OP(UseEglImage)(fd, eglContext->eglContext.display, eglContext->eglContext.context, i, &eglImages[i]);
             ASSERT(ret == 0);

             GLenum target = GL_TEXTURE_2D;
             if (IS_DMA_BUF())
                 target = GL_TEXTURE_EXTERNAL_OES;
             glBindTexture(target, textureIds[i]);
             imageTargetTexture2D(target, eglImages[i]);

             glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
             glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
             DEBUG("textureIds[%d]: 0x%x, eglImages[%d]: 0x%p", i, textureIds[i], i, eglImages[i]);
        }
    }
#endif

#ifndef ANDROID
    // feed output frames first
    for (i=0; i<outputQueueCapacity; i++) {
        if (!takeOneOutputFrame(fd, i)) {
            ASSERT(0);
        }
    }
#else
    struct v4l2_buffer buffer;

    err = native_window_set_buffer_count(mNativeWindow.get(), outputQueueCapacity);
    if (err != 0) {
        fprintf(stderr, "native_window_set_buffer_count failed: %s (%d)", strerror(-err), -err);
        return -1;
    }

    //queue buffs
    for (uint32_t i = 0; i < outputQueueCapacity; i++) {
        ANativeWindowBuffer* pbuf = NULL;
        memset(&buffer, 0, sizeof(buffer));

        err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &pbuf);
        if (err != 0) {
            fprintf(stderr, "dequeueBuffer failed: %s (%d)\n", strerror(-err), -err);
            return -1;
        }

        buffer.m.userptr = (unsigned long)pbuf;
        buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
        buffer.index = i;

        ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_QBUF, &buffer);
        ASSERT(ioctlRet != -1);
        mWindBuff.push_back(pbuf);
    }

    for (uint32_t i = 0; i < minUndequeuedBuffs; i++) {
        memset(&buffer, 0, sizeof(buffer));
        buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;

        ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_DQBUF, &buffer);
        ASSERT(ioctlRet != -1);

        err = mNativeWindow->cancelBuffer(mNativeWindow.get(), mWindBuff[buffer.index], -1);
        if (err) {
            fprintf(stderr, "queue empty window buffer error\n");
            return -1;
        }
    }
#endif

    // output port starts as late as possible to adopt user provide output buffer
    type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_STREAMON, &type);
    ASSERT(ioctlRet != -1);

    bool event_pending=true; // try to get video resolution.
    int dqCountAfterEOS = 0;
    do {
        if (event_pending) {
            handleResolutionChange(fd);
        }

        takeOneOutputFrame(fd);
        if (!feedOneInputFrame(input, fd)) {
            if (stagingBufferInDevice == 0)
                break;
            dqCountAfterEOS++;
        }
        if (dqCountAfterEOS == inputQueueCapacity)  // input drain
            break;
    } while (SIMULATE_V4L2_OP(Poll)(fd, true, &event_pending) == 0);

    // drain output buffer
    int retry = 3;
    while (takeOneOutputFrame(fd) || (--retry)>0) { // output drain
        usleep(10000);
    }

    calcFps.fps(renderFrameCount);
    // SIMULATE_V4L2_OP(Munmap)(void* addr, size_t length)
    possibleWait(input->getMimeType());

    // release queued input/output buffer
    memset(&reqbufs, 0, sizeof(reqbufs));
    reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
    reqbufs.memory = V4L2_MEMORY_MMAP;
    reqbufs.count = 0;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_REQBUFS, &reqbufs);
    ASSERT(ioctlRet != -1);

    memset(&reqbufs, 0, sizeof(reqbufs));
    reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    reqbufs.memory = V4L2_MEMORY_MMAP;
    reqbufs.count = 0;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_REQBUFS, &reqbufs);
    ASSERT(ioctlRet != -1);

    // stop input port
    type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_STREAMOFF, &type);
    ASSERT(ioctlRet != -1);

    // stop output port
    type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    ioctlRet = SIMULATE_V4L2_OP(Ioctl)(fd, VIDIOC_STREAMOFF, &type);
    ASSERT(ioctlRet != -1);

#ifndef ANDROID
    if(textureIds.size())
        glDeleteTextures(textureIds.size(), &textureIds[0]);
    ASSERT(glGetError() == GL_NO_ERROR);
#endif

#ifdef ANDROID
    //TODO, some resources need to destroy?
#elif __ENABLE_V4L2_GLX__
    glxRelease(glxContext, &pixmaps[0], &glxPixmaps[0], pixmaps.size());
#else
    for (i=0; i<eglImages.size(); i++) {
        destroyImage(eglContext->eglContext.display, eglImages[i]);
    }
    /*
    there is still randomly fail in mesa; no good idea for it. seems mesa bug
    0  0x00007ffff079c343 in _mesa_symbol_table_dtor () from /usr/lib/x86_64-linux-gnu/libdricore9.2.1.so.1
    1  0x00007ffff073c55d in glsl_symbol_table::~glsl_symbol_table() () from /usr/lib/x86_64-linux-gnu/libdricore9.2.1.so.1
    2  0x00007ffff072a4d5 in ?? () from /usr/lib/x86_64-linux-gnu/libdricore9.2.1.so.1
    3  0x00007ffff072a4bd in ?? () from /usr/lib/x86_64-linux-gnu/libdricore9.2.1.so.1
    4  0x00007ffff064b48f in _mesa_reference_shader () from /usr/lib/x86_64-linux-gnu/libdricore9.2.1.so.1
    5  0x00007ffff0649397 in ?? () from /usr/lib/x86_64-linux-gnu/libdricore9.2.1.so.1
    6  0x000000000040624d in releaseShader (program=0x77cd90) at ./egl/gles2_help.c:158
    7  eglRelease (context=0x615920) at ./egl/gles2_help.c:310
    8  0x0000000000402ca8 in main (argc=<optimized out>, argv=<optimized out>) at v4l2decode.cpp:531
    */
    if (eglContext)
        eglRelease(eglContext);
#endif

    // close device
    ioctlRet = SIMULATE_V4L2_OP(Close)(fd);
    ASSERT(ioctlRet != -1);

    if(input)
        delete input;

    if (outfp)
        fclose(outfp);

    if (dumpOutputName)
        free(dumpOutputName);

#if __ENABLE_V4L2_GLX__
    if (x11Display && x11Window)
        XDestroyWindow(x11Display, x11Window);
    if (x11Display)
        XCloseDisplay(x11Display);
#endif

    fprintf(stdout, "decode done\n");
}
示例#15
0
void SoftwareRenderer::render(
        const void *data, size_t size, void *platformPrivate) {

    ANativeWindowBuffer *buf;
    int err;
    if ((err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(),
            &buf)) != 0) {
        ALOGW("Surface::dequeueBuffer returned error %d", err);
        return;
    }

    GraphicBufferMapper &mapper = GraphicBufferMapper::get();

    Rect bounds(mCropWidth, mCropHeight);

    void *dst;
    CHECK_EQ(0, mapper.lock(
                buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN, bounds, &dst));

    if (mConverter) {
        mConverter->convert(
                data,
                mWidth, mHeight,
                mCropLeft, mCropTop, mCropRight, mCropBottom,
                dst,
                buf->stride, buf->height,
                0, 0, mCropWidth - 1, mCropHeight - 1);
    } else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
        const uint8_t *src_y = (const uint8_t *)data;
        const uint8_t *src_u = (const uint8_t *)data + mWidth * mHeight;
        const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);

        uint8_t *dst_y = (uint8_t *)dst;
        size_t dst_y_size = buf->stride * buf->height;
        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
        size_t dst_c_size = dst_c_stride * buf->height / 2;
        uint8_t *dst_v = dst_y + dst_y_size;
        uint8_t *dst_u = dst_v + dst_c_size;

        for (int y = 0; y < mCropHeight; ++y) {
            memcpy(dst_y, src_y, mCropWidth);

            src_y += mWidth;
            dst_y += buf->stride;
        }

        for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
            memcpy(dst_u, src_u, (mCropWidth + 1) / 2);
            memcpy(dst_v, src_v, (mCropWidth + 1) / 2);

            src_u += mWidth / 2;
            src_v += mWidth / 2;
            dst_u += dst_c_stride;
            dst_v += dst_c_stride;
        }
    }
    else if(mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar)
    {
        const uint8_t *src_y =
            (const uint8_t *)data;

        const uint8_t *src_uv =
            (const uint8_t *)data + mWidth * (mHeight - mCropTop / 2);

        uint8_t *dst_y = (uint8_t *)dst;

        size_t dst_y_size = buf->stride * buf->height;
        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
        size_t dst_c_size = dst_c_stride * buf->height / 2;
        uint8_t *dst_v = dst_y + dst_y_size;
        uint8_t *dst_u = dst_v + dst_c_size;

        for (int y = 0; y < mCropHeight; ++y) {
            memcpy(dst_y, src_y, mCropWidth);

            src_y += mWidth;
            dst_y += buf->stride;
        }
        for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
            size_t tmp = (mCropWidth + 1) / 2;
            for (size_t x = 0; x < tmp; ++x) {
                dst_u[x] = src_uv[2 * x];
                dst_v[x] = src_uv[2 * x + 1];
            }

            src_uv += mWidth;
            dst_u += dst_c_stride;
            dst_v += dst_c_stride;
        }
    }
    else if(mColorFormat == OMX_COLOR_Format24bitRGB888)//
    {
        memcpy(dst, data, size);
    }

    printf(">>>>>>>>>>>\n");
    CHECK_EQ(0, mapper.unlock(buf->handle));

    if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf,
            -1)) != 0) {
        printf("Surface::queueBuffer returned error %d\n", err);
    }
    buf = NULL;
}
void SoftwareRenderer::render(
        const void *data, size_t size, int64_t timestampNs,
        void* /*platformPrivate*/, const sp<AMessage>& format) {
    resetFormatIfChanged(format);

    ANativeWindowBuffer *buf;
    int err;
    if ((err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(),
            &buf)) != 0) {
        ALOGW("Surface::dequeueBuffer returned error %d", err);
        return;
    }

    GraphicBufferMapper &mapper = GraphicBufferMapper::get();

    Rect bounds(mCropWidth, mCropHeight);

    void *dst;
    CHECK_EQ(0, mapper.lock(
                buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN, bounds, &dst));

    if (mConverter) {
        mConverter->convert(
                data,
                mWidth, mHeight,
                mCropLeft, mCropTop, mCropRight, mCropBottom,
                dst,
                buf->stride, buf->height,
                0, 0, mCropWidth - 1, mCropHeight - 1);
    } else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
        if ((size_t)mWidth * mHeight * 3 / 2 > size) {
            goto skip_copying;
        }
        const uint8_t *src_y = (const uint8_t *)data;
        const uint8_t *src_u = (const uint8_t *)data + mWidth * mHeight;
        const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);

        uint8_t *dst_y = (uint8_t *)dst;
        size_t dst_y_size = buf->stride * buf->height;
        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
        size_t dst_c_size = dst_c_stride * buf->height / 2;
        uint8_t *dst_v = dst_y + dst_y_size;
        uint8_t *dst_u = dst_v + dst_c_size;

        for (int y = 0; y < mCropHeight; ++y) {
            memcpy(dst_y, src_y, mCropWidth);

            src_y += mWidth;
            dst_y += buf->stride;
        }

        for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
            memcpy(dst_u, src_u, (mCropWidth + 1) / 2);
            memcpy(dst_v, src_v, (mCropWidth + 1) / 2);

            src_u += mWidth / 2;
            src_v += mWidth / 2;
            dst_u += dst_c_stride;
            dst_v += dst_c_stride;
        }
    } else if (mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
            || mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
        if ((size_t)mWidth * mHeight * 3 / 2 > size) {
            goto skip_copying;
        }

        const uint8_t *src_y =
            (const uint8_t *)data;

        const uint8_t *src_uv =
            (const uint8_t *)data + mWidth * (mHeight - mCropTop / 2);

        uint8_t *dst_y = (uint8_t *)dst;

        size_t dst_y_size = buf->stride * buf->height;
        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
        size_t dst_c_size = dst_c_stride * buf->height / 2;
        uint8_t *dst_v = dst_y + dst_y_size;
        uint8_t *dst_u = dst_v + dst_c_size;

        for (int y = 0; y < mCropHeight; ++y) {
            memcpy(dst_y, src_y, mCropWidth);

            src_y += mWidth;
            dst_y += buf->stride;
        }

        for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
            size_t tmp = (mCropWidth + 1) / 2;
            for (size_t x = 0; x < tmp; ++x) {
                dst_u[x] = src_uv[2 * x];
                dst_v[x] = src_uv[2 * x + 1];
            }

            src_uv += mWidth;
            dst_u += dst_c_stride;
            dst_v += dst_c_stride;
        }
    } else {
        LOG_ALWAYS_FATAL("bad color format %#x", mColorFormat);
    }

skip_copying:
    CHECK_EQ(0, mapper.unlock(buf->handle));

    if ((err = native_window_set_buffers_timestamp(mNativeWindow.get(),
            timestampNs)) != 0) {
        ALOGW("Surface::set_buffers_timestamp returned error %d", err);
    }

    if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf,
            -1)) != 0) {
        ALOGW("Surface::queueBuffer returned error %d", err);
    }
    buf = NULL;
}
void SoftwareRenderer::render(
        const void *data, size_t size, void *platformPrivate) {
    ANativeWindowBuffer *buf;
    int err;
    if ((err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(),
            &buf)) != 0) {
        ALOGW("Surface::dequeueBuffer returned error %d", err);
        return;
    }

    GraphicBufferMapper &mapper = GraphicBufferMapper::get();

    Rect bounds(mCropWidth, mCropHeight);

    void *dst;
    CHECK_EQ(0, mapper.lock(
                buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN, bounds, &dst));

    if (mConverter) {
        mConverter->convert(
                data,
                mWidth, mHeight,
                mCropLeft, mCropTop, mCropRight, mCropBottom,
                dst,
                buf->stride, buf->height,
                0, 0, mCropWidth - 1, mCropHeight - 1);
    } else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
        const uint8_t *src_y = (const uint8_t *)data;
        const uint8_t *src_u = (const uint8_t *)data + mWidth * mHeight;
        const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);

        uint8_t *dst_y = (uint8_t *)dst;
        size_t dst_y_size = buf->stride * buf->height;
        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
        size_t dst_c_size = dst_c_stride * buf->height / 2;
        uint8_t *dst_v = dst_y + dst_y_size;
        uint8_t *dst_u = dst_v + dst_c_size;

        for (int y = 0; y < mCropHeight; ++y) {
            memcpy(dst_y, src_y, mCropWidth);

            src_y += mWidth;
            dst_y += buf->stride;
        }

        for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
            memcpy(dst_u, src_u, (mCropWidth + 1) / 2);
            memcpy(dst_v, src_v, (mCropWidth + 1) / 2);

            src_u += mWidth / 2;
            src_v += mWidth / 2;
            dst_u += dst_c_stride;
            dst_v += dst_c_stride;
        }
#ifdef QCOM_LEGACY_OMX
    } else if (mColorFormat == OMX_QCOM_COLOR_FormatYVU420SemiPlanar) {
        // Legacy Qualcomm color format

        uint8_t *src_y = (uint8_t *)data;
        uint8_t *src_u = src_y + mAlign;
        uint8_t *dst_y = (uint8_t *)dst;
        uint8_t *dst_u = dst_y + buf->stride * buf->height;

        // Legacy codec doesn't return crop params. Ignore it for speedup :)
        memcpy(dst_y, src_y, mCropWidth * mCropHeight);
        memcpy(dst_u, src_u, mCropWidth * mCropHeight / 2);

        /*for(size_t y = 0; y < mCropHeight; ++y) {
            memcpy(dst_y, src_y, mCropWidth);
            dst_y += buf->stride;
            src_y += mWidth;

            if(y & 1) {
                memcpy(dst_u, src_u, mCropWidth);
                dst_u += buf->stride;
                src_u += mWidth;
            }
        }*/
#endif
    } else {
        CHECK_EQ(mColorFormat, OMX_TI_COLOR_FormatYUV420PackedSemiPlanar);

        const uint8_t *src_y =
            (const uint8_t *)data;

        const uint8_t *src_uv =
            (const uint8_t *)data + mWidth * (mHeight - mCropTop / 2);

#ifdef EXYNOS4_ENHANCEMENTS
        void *pYUVBuf[3];

        CHECK_EQ(0, mapper.unlock(buf->handle));
        CHECK_EQ(0, mapper.lock(
                buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN | GRALLOC_USAGE_YUV_ADDR, bounds, pYUVBuf));

        size_t dst_c_stride = buf->stride / 2;
        uint8_t *dst_y = (uint8_t *)pYUVBuf[0];
        uint8_t *dst_v = (uint8_t *)pYUVBuf[1];
        uint8_t *dst_u = (uint8_t *)pYUVBuf[2];
#else
        size_t dst_y_size = buf->stride * buf->height;
        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
        size_t dst_c_size = dst_c_stride * buf->height / 2;
        uint8_t *dst_y = (uint8_t *)dst;
        uint8_t *dst_v = dst_y + dst_y_size;
        uint8_t *dst_u = dst_v + dst_c_size;
#endif

        for (int y = 0; y < mCropHeight; ++y) {
            memcpy(dst_y, src_y, mCropWidth);

            src_y += mWidth;
            dst_y += buf->stride;
        }

        for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
            size_t tmp = (mCropWidth + 1) / 2;
            for (size_t x = 0; x < tmp; ++x) {
                dst_u[x] = src_uv[2 * x];
                dst_v[x] = src_uv[2 * x + 1];
            }

            src_uv += mWidth;
            dst_u += dst_c_stride;
            dst_v += dst_c_stride;
        }
    }

    CHECK_EQ(0, mapper.unlock(buf->handle));

    if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf,
            -1)) != 0) {
        ALOGW("Surface::queueBuffer returned error %d", err);
    }
    buf = NULL;
}