void cv::superres::Farneback_GPU::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
    GpuMat frame0 = ::getGpuMat(_frame0, buf[0]);
    GpuMat frame1 = ::getGpuMat(_frame1, buf[1]);

    CV_DbgAssert( frame1.type() == frame0.type() );
    CV_DbgAssert( frame1.size() == frame0.size() );

    GpuMat input0 = ::convertToType(frame0, CV_8U, 1, buf[2], buf[3]);
    GpuMat input1 = ::convertToType(frame1, CV_8U, 1, buf[4], buf[5]);

    if (_flow2.needed() && _flow1.kind() == _InputArray::GPU_MAT && _flow2.kind() == _InputArray::GPU_MAT)
    {
        call(input0, input1, _flow1.getGpuMatRef(), _flow2.getGpuMatRef());
        return;
    }

    call(input0, input1, u, v);

    if (_flow2.needed())
    {
        ::copy(_flow1, u);
        ::copy(_flow2, v);
    }
    else
    {
        GpuMat src[] = {u, v};
        gpu::merge(src, 2, flow);
        ::copy(_flow1, flow);
    }
}
void cv::ogl::Buffer::copyTo(OutputArray arr, Target target, bool autoRelease) const
{
#ifndef HAVE_OPENGL
    (void) arr;
    (void) target;
    (void) autoRelease;
    throw_nogl();
#else
    const int kind = arr.kind();

    switch (kind)
    {
    case _InputArray::OPENGL_BUFFER:
        {
            arr.getOGlBufferRef().copyFrom(*this, target, autoRelease);
            break;
        }

    case _InputArray::OPENGL_TEXTURE:
        {
            arr.getOGlTexture2DRef().copyFrom(*this, autoRelease);
            break;
        }

    case _InputArray::GPU_MAT:
        {
            #if !defined HAVE_CUDA || defined(CUDA_DISABLER)
                throw_nocuda();
            #else
                GpuMat& dmat = arr.getGpuMatRef();
                dmat.create(rows_, cols_, type_);
                impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows);
            #endif

            break;
        }

    default:
        {
            arr.create(rows_, cols_, type_);
            Mat mat = arr.getMat();
            CV_Assert( mat.isContinuous() );
            impl_->copyTo(mat.rows * mat.cols * mat.elemSize(), mat.data);
        }
    }
#endif
}
Example #3
0
void cv::gpu::evenLevels(OutputArray _levels, int nLevels, int lowerLevel, int upperLevel)
{
    const int kind = _levels.kind();

    _levels.create(1, nLevels, CV_32SC1);

    Mat host_levels;
    if (kind == _InputArray::GPU_MAT)
        host_levels.create(1, nLevels, CV_32SC1);
    else
        host_levels = _levels.getMat();

    nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );

    if (kind == _InputArray::GPU_MAT)
        _levels.getGpuMatRef().upload(host_levels);
}
Example #4
0
void cv::cuda::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
{
    switch (arr.kind())
    {
    case _InputArray::MAT:
        ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getMatRef());
        break;

    case _InputArray::CUDA_GPU_MAT:
        ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getGpuMatRef());
        break;

    case _InputArray::CUDA_HOST_MEM:
        ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getHostMemRef());
        break;

    default:
        arr.create(rows, cols, type);
    }
}
Example #5
0
void cv::cuda::createContinuous(int rows, int cols, int type, OutputArray arr)
{
    switch (arr.kind())
    {
    case _InputArray::MAT:
        ::createContinuousImpl(rows, cols, type, arr.getMatRef());
        break;

    case _InputArray::GPU_MAT:
        ::createContinuousImpl(rows, cols, type, arr.getGpuMatRef());
        break;

    case _InputArray::CUDA_MEM:
        ::createContinuousImpl(rows, cols, type, arr.getCudaMemRef());
        break;

    default:
        arr.create(rows, cols, type);
    }
}
Example #6
0
void cv::ogl::Buffer::copyTo(OutputArray arr) const
{
#ifndef HAVE_OPENGL
    (void) arr;
    throw_no_ogl();
#else
    const int kind = arr.kind();

    switch (kind)
    {
    case _InputArray::OPENGL_BUFFER:
        {
            arr.getOGlBufferRef().copyFrom(*this);
            break;
        }

    case _InputArray::GPU_MAT:
        {
            #ifndef HAVE_CUDA
                throw_no_cuda();
            #else
                GpuMat& dmat = arr.getGpuMatRef();
                dmat.create(rows_, cols_, type_);
                impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows);
            #endif

            break;
        }

    default:
        {
            arr.create(rows_, cols_, type_);
            Mat mat = arr.getMat();
            CV_Assert( mat.isContinuous() );
            impl_->copyTo(mat.rows * mat.cols * mat.elemSize(), mat.data);
        }
    }
#endif
}
Example #7
0
void cv::gpu::histEven(InputArray _src, OutputArray hist, InputOutputArray buf, int histSize, int lowerLevel, int upperLevel, Stream& stream)
{
    typedef void (*hist_t)(const GpuMat& src, OutputArray hist, InputOutputArray buf, int levels, int lowerLevel, int upperLevel, cudaStream_t stream);
    static const hist_t hist_callers[] =
    {
        NppHistogramEvenC1<CV_8U , nppiHistogramEven_8u_C1R , nppiHistogramEvenGetBufferSize_8u_C1R >::hist,
        0,
        NppHistogramEvenC1<CV_16U, nppiHistogramEven_16u_C1R, nppiHistogramEvenGetBufferSize_16u_C1R>::hist,
        NppHistogramEvenC1<CV_16S, nppiHistogramEven_16s_C1R, nppiHistogramEvenGetBufferSize_16s_C1R>::hist
    };

    GpuMat src = _src.getGpuMat();

    if (src.depth() == CV_8U && deviceSupports(FEATURE_SET_COMPUTE_30))
    {
        histEven8u(src, hist.getGpuMatRef(), histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
        return;
    }

    CV_Assert( src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );

    hist_callers[src.depth()](src, hist, buf, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
}