Пример #1
0
bool cv::gpu::VideoReader_GPU::Impl::grab(GpuMat& frame)
{
    if (videoSource_->hasError() || videoParser_->hasError())
        CV_Error(CV_StsUnsupportedFormat, "Unsupported video source");

    if (!videoSource_->isStarted() || frameQueue_->isEndOfDecode())
        return false;

    if (frames_.empty())
    {
        CUVIDPARSERDISPINFO displayInfo;

        for (;;)
        {
            if (frameQueue_->dequeue(displayInfo))
                break;

            if (videoSource_->hasError() || videoParser_->hasError())
                CV_Error(CV_StsUnsupportedFormat, "Unsupported video source");

            if (frameQueue_->isEndOfDecode())
                return false;

            // Wait a bit
            detail::Thread::sleep(1);
        }

        bool isProgressive = displayInfo.progressive_frame != 0;
        const int num_fields = isProgressive ? 1 : 2 + displayInfo.repeat_first_field;

        for (int active_field = 0; active_field < num_fields; ++active_field)
        {
            CUVIDPROCPARAMS videoProcParams;
            std::memset(&videoProcParams, 0, sizeof(CUVIDPROCPARAMS));

            videoProcParams.progressive_frame = displayInfo.progressive_frame;
            videoProcParams.second_field      = active_field;
            videoProcParams.top_field_first   = displayInfo.top_field_first;
            videoProcParams.unpaired_field    = (num_fields == 1);

            frames_.push_back(std::make_pair(displayInfo, videoProcParams));
        }
    }

    if (frames_.empty())
        return false;

    std::pair<CUVIDPARSERDISPINFO, CUVIDPROCPARAMS> frameInfo = frames_.front();
    frames_.pop_front();

    {
        VideoCtxAutoLock autoLock(lock_);

        // map decoded video frame to CUDA surface
        cv::gpu::GpuMat decodedFrame = videoDecoder_->mapFrame(frameInfo.first.picture_index, frameInfo.second);

        // perform post processing on the CUDA surface (performs colors space conversion and post processing)
        // comment this out if we inclue the line of code seen above
        cudaPostProcessFrame(decodedFrame, frame, videoDecoder_->targetWidth(), videoDecoder_->targetHeight());

        // unmap video frame
        // unmapFrame() synchronizes with the VideoDecode API (ensures the frame has finished decoding)
        videoDecoder_->unmapFrame(decodedFrame);
    }

    // release the frame, so it can be re-used in decoder
    if (frames_.empty())
        frameQueue_->releaseFrame(frameInfo.first);

    return true;
}
Пример #2
0
// Run the Cuda part of the computation
bool copyDecodedFrameToTexture(unsigned int &nRepeats, int bUseInterop, int *pbIsProgressive)
{
    CUVIDPARSERDISPINFO oDisplayInfo;

    if (g_pFrameQueue->dequeue(&oDisplayInfo))
    {
        CCtxAutoLock lck(g_CtxLock);
        // Push the current CUDA context (only if we are using CUDA decoding path)
        CUresult result = cuCtxPushCurrent(g_oContext);

        CUdeviceptr  pDecodedFrame[2] = { 0, 0 };
        CUdeviceptr  pInteropFrame[2] = { 0, 0 };

        int num_fields = (oDisplayInfo.progressive_frame ? (1) : (2+oDisplayInfo.repeat_first_field));
        *pbIsProgressive = oDisplayInfo.progressive_frame;
        g_bIsProgressive = oDisplayInfo.progressive_frame ? true : false;

        for (int active_field=0; active_field<num_fields; active_field++)
        {
            nRepeats = oDisplayInfo.repeat_first_field;
            CUVIDPROCPARAMS oVideoProcessingParameters;
            memset(&oVideoProcessingParameters, 0, sizeof(CUVIDPROCPARAMS));

            oVideoProcessingParameters.progressive_frame = oDisplayInfo.progressive_frame;
            oVideoProcessingParameters.second_field      = active_field;
            oVideoProcessingParameters.top_field_first   = oDisplayInfo.top_field_first;
            oVideoProcessingParameters.unpaired_field    = (num_fields == 1);

            unsigned int nDecodedPitch = 0;
            unsigned int nWidth = 0;
            unsigned int nHeight = 0;

            // map decoded video frame to CUDA surfae
            g_pVideoDecoder->mapFrame(oDisplayInfo.picture_index, &pDecodedFrame[active_field], &nDecodedPitch, &oVideoProcessingParameters);
            nWidth  = g_pVideoDecoder->targetWidth();
            nHeight = g_pVideoDecoder->targetHeight();
            // map DirectX texture to CUDA surface
            size_t nTexturePitch = 0;

            // If we are Encoding and this is the 1st Frame, we make sure we allocate system memory for readbacks
            if (g_bReadback && g_bFirstFrame && g_ReadbackSID)
            {
                CUresult result;
                checkCudaErrors(result = cuMemAllocHost((void **)&g_bFrameData[0], (nDecodedPitch * nHeight * 3 / 2)));
                checkCudaErrors(result = cuMemAllocHost((void **)&g_bFrameData[1], (nDecodedPitch * nHeight * 3 / 2)));
                g_bFirstFrame = false;

                if (result != CUDA_SUCCESS)
                {
                    printf("cuMemAllocHost returned %d\n", (int)result);
                }
            }

            // If streams are enabled, we can perform the readback to the host while the kernel is executing
            if (g_bReadback && g_ReadbackSID)
            {
                CUresult result = cuMemcpyDtoHAsync(g_bFrameData[active_field], pDecodedFrame[active_field], (nDecodedPitch * nHeight * 3 / 2), g_ReadbackSID);

                if (result != CUDA_SUCCESS)
                {
                    printf("cuMemAllocHost returned %d\n", (int)result);
                }
            }

#if ENABLE_DEBUG_OUT
            printf("%s = %02d, PicIndex = %02d, OutputPTS = %08d\n",
                   (oDisplayInfo.progressive_frame ? "Frame" : "Field"),
                   g_DecodeFrameCount, oDisplayInfo.picture_index, oDisplayInfo.timestamp);
#endif

            if (g_pImageDX)
            {
                // map the texture surface
                g_pImageDX->map(&pInteropFrame[active_field], &nTexturePitch, active_field);
            }
            else
            {
                pInteropFrame[active_field] = g_pInteropFrame[active_field];
                nTexturePitch = g_pVideoDecoder->targetWidth() * 2;
            }

            // perform post processing on the CUDA surface (performs colors space conversion and post processing)
            // comment this out if we inclue the line of code seen above
            cudaPostProcessFrame(&pDecodedFrame[active_field], nDecodedPitch, &pInteropFrame[active_field], nTexturePitch, g_pCudaModule->getModule(), gfpNV12toARGB, g_KernelSID);

            if (g_pImageDX)
            {
                // unmap the texture surface
                g_pImageDX->unmap(active_field);
            }

            // unmap video frame
            // unmapFrame() synchronizes with the VideoDecode API (ensures the frame has finished decoding)
            g_pVideoDecoder->unmapFrame(pDecodedFrame[active_field]);
            // release the frame, so it can be re-used in decoder
            g_pFrameQueue->releaseFrame(&oDisplayInfo);
            g_DecodeFrameCount++;
        }

        // Detach from the Current thread
        checkCudaErrors(cuCtxPopCurrent(NULL));
    }
    else
    {
        return false;
    }

    // check if decoding has come to an end.
    // if yes, signal the app to shut down.
    if (!g_pVideoSource->isStarted() || g_pFrameQueue->isEndOfDecode())
    {
        // Let's free the Frame Data
        if (g_ReadbackSID && g_bFrameData)
        {
            cuMemFreeHost((void *)g_bFrameData[0]);
            cuMemFreeHost((void *)g_bFrameData[1]);
            g_bFrameData[0] = NULL;
            g_bFrameData[1] = NULL;
        }

        // Let's just stop, and allow the user to quit, so they can at least see the results
        g_pVideoSource->stop();

        // If we want to loop reload the video file and restart
        if (g_bLoop && !g_bAutoQuit)
        {
            reinitCudaResources();
            g_FrameCount = 0;
            g_DecodeFrameCount = 0;
            g_pVideoSource->start();
        }

        if (g_bAutoQuit)
        {
            g_bDone = true;
        }
    }

    return true;
}