void cv::superres::Farneback_GPU::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2) { GpuMat frame0 = ::getGpuMat(_frame0, buf[0]); GpuMat frame1 = ::getGpuMat(_frame1, buf[1]); CV_DbgAssert( frame1.type() == frame0.type() ); CV_DbgAssert( frame1.size() == frame0.size() ); GpuMat input0 = ::convertToType(frame0, CV_8U, 1, buf[2], buf[3]); GpuMat input1 = ::convertToType(frame1, CV_8U, 1, buf[4], buf[5]); if (_flow2.needed() && _flow1.kind() == _InputArray::GPU_MAT && _flow2.kind() == _InputArray::GPU_MAT) { call(input0, input1, _flow1.getGpuMatRef(), _flow2.getGpuMatRef()); return; } call(input0, input1, u, v); if (_flow2.needed()) { ::copy(_flow1, u); ::copy(_flow2, v); } else { GpuMat src[] = {u, v}; gpu::merge(src, 2, flow); ::copy(_flow1, flow); } }
GpuMat cv::cuda::getOutputMat(OutputArray _dst, int rows, int cols, int type, Stream& stream) { GpuMat dst; #ifndef HAVE_CUDA (void) _dst; (void) rows; (void) cols; (void) type; (void) stream; throw_no_cuda(); #else if (_dst.kind() == _InputArray::CUDA_GPU_MAT) { _dst.create(rows, cols, type); dst = _dst.getGpuMat(); } else { BufferPool pool(stream); dst = pool.getBuffer(rows, cols, type); } #endif return dst; }
void cv::superres::arrCopy(InputArray src, OutputArray dst) { typedef void (*func_t)(InputArray src, OutputArray dst); static const func_t funcs[10][10] = { {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr}, {0, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr}, {0, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, arr2buf, arr2tex, gpu2gpu} }; const int src_kind = src.kind() >> _InputArray::KIND_SHIFT; const int dst_kind = dst.kind() >> _InputArray::KIND_SHIFT; CV_DbgAssert( src_kind >= 0 && src_kind < 10 ); CV_DbgAssert( dst_kind >= 0 && dst_kind < 10 ); const func_t func = funcs[src_kind][dst_kind]; CV_DbgAssert( func != 0 ); func(src, dst); }
void BTVL1::processImpl(Ptr<FrameSource>& frameSource, OutputArray _output) { if (outPos_ >= storePos_) { _output.release(); return; } readNextFrame(frameSource); if (procPos_ < storePos_) { ++procPos_; processFrame(procPos_); } ++outPos_; CV_OCL_RUN(isUmat_, ocl_processImpl(frameSource, _output)) const Mat& curOutput = at(outPos_, outputs_); if (_output.kind() < _InputArray::OPENGL_BUFFER || _output.isUMat()) curOutput.convertTo(_output, CV_8U); else { curOutput.convertTo(finalOutput_, CV_8U); arrCopy(finalOutput_, _output); } }
void cv::superres::arrCopy(InputArray src, OutputArray dst) { if (dst.isUMat() || src.isUMat()) { src.copyTo(dst); return; } typedef void (*func_t)(InputArray src, OutputArray dst); static const func_t funcs[10][10] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, 0, buf2arr }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, arr2buf, 0 , gpu2gpu }, }; const int src_kind = src.kind() >> _InputArray::KIND_SHIFT; const int dst_kind = dst.kind() >> _InputArray::KIND_SHIFT; CV_Assert( src_kind >= 0 && src_kind < 10 ); CV_Assert( dst_kind >= 0 && dst_kind < 10 ); const func_t func = funcs[src_kind][dst_kind]; CV_Assert( func != 0 ); func(src, dst); }
void cv::superres::Farneback::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2) { Mat frame0 = ::getMat(_frame0, buf[0]); Mat frame1 = ::getMat(_frame1, buf[1]); CV_DbgAssert( frame1.type() == frame0.type() ); CV_DbgAssert( frame1.size() == frame0.size() ); Mat input0 = ::convertToType(frame0, CV_8U, 1, buf[2], buf[3]); Mat input1 = ::convertToType(frame1, CV_8U, 1, buf[4], buf[5]); if (!_flow2.needed() && _flow1.kind() != _InputArray::GPU_MAT) { call(input0, input1, _flow1); return; } call(input0, input1, flow); if (!_flow2.needed()) { ::copy(_flow1, flow); } else { split(flow, flows); ::copy(_flow1, flows[0]); ::copy(_flow2, flows[1]); } }
void cv::viz::readTrajectory(OutputArray _traj, const String& files_format, int start, int end, const String& tag) { CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT); start = max(0, std::min(start, end)); end = std::max(start, end); std::vector<Affine3d> traj; for(int i = start; i < end; ++i) { Affine3d affine; bool ok = readPose(cv::format(files_format.c_str(), i), affine, tag); if (!ok) break; traj.push_back(affine); } Mat(traj).convertTo(_traj, _traj.depth()); }
void cv::ogl::Texture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const { #ifndef HAVE_OPENGL (void) arr; (void) ddepth; (void) autoRelease; throw_no_ogl(); #else const int kind = arr.kind(); const int cn = format_ == DEPTH_COMPONENT ? 1: format_ == RGB ? 3 : 4; const GLenum dstFormat = format_ == DEPTH_COMPONENT ? gl::DEPTH_COMPONENT : format_ == RGB ? gl::BGR : gl::BGRA; switch(kind) { case _InputArray::OPENGL_BUFFER: { ogl::Buffer& buf = arr.getOGlBufferRef(); buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER, autoRelease); buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER); impl_->copyTo(dstFormat, gl_types[ddepth], 0); ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER); break; } case _InputArray::GPU_MAT: { #ifndef HAVE_CUDA throw_no_cuda(); #else ogl::Buffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER); buf.setAutoRelease(true); buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER); impl_->copyTo(dstFormat, gl_types[ddepth], 0); ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER); buf.copyTo(arr); #endif break; } default: { arr.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn)); Mat mat = arr.getMat(); CV_Assert( mat.isContinuous() ); ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER); impl_->copyTo(dstFormat, gl_types[ddepth], mat.data); } } #endif }
void cv::ogl::Buffer::copyTo(OutputArray arr, Target target, bool autoRelease) const { #ifndef HAVE_OPENGL (void) arr; (void) target; (void) autoRelease; throw_nogl(); #else const int kind = arr.kind(); switch (kind) { case _InputArray::OPENGL_BUFFER: { arr.getOGlBufferRef().copyFrom(*this, target, autoRelease); break; } case _InputArray::OPENGL_TEXTURE: { arr.getOGlTexture2DRef().copyFrom(*this, autoRelease); break; } case _InputArray::GPU_MAT: { #if !defined HAVE_CUDA || defined(CUDA_DISABLER) throw_nocuda(); #else GpuMat& dmat = arr.getGpuMatRef(); dmat.create(rows_, cols_, type_); impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows); #endif break; } default: { arr.create(rows_, cols_, type_); Mat mat = arr.getMat(); CV_Assert( mat.isContinuous() ); impl_->copyTo(mat.rows * mat.cols * mat.elemSize(), mat.data); } } #endif }
void cv::cuda::syncOutput(const GpuMat& dst, OutputArray _dst, Stream& stream) { #ifndef HAVE_CUDA (void) dst; (void) _dst; (void) stream; throw_no_cuda(); #else if (_dst.kind() != _InputArray::CUDA_GPU_MAT) { if (stream) dst.download(_dst, stream); else dst.download(_dst); } #endif }
void cv::gpu::evenLevels(OutputArray _levels, int nLevels, int lowerLevel, int upperLevel) { const int kind = _levels.kind(); _levels.create(1, nLevels, CV_32SC1); Mat host_levels; if (kind == _InputArray::GPU_MAT) host_levels.create(1, nLevels, CV_32SC1); else host_levels = _levels.getMat(); nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) ); if (kind == _InputArray::GPU_MAT) _levels.getGpuMatRef().upload(host_levels); }
void cv::cuda::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr) { switch (arr.kind()) { case _InputArray::MAT: ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getMatRef()); break; case _InputArray::CUDA_GPU_MAT: ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getGpuMatRef()); break; case _InputArray::CUDA_HOST_MEM: ::ensureSizeIsEnoughImpl(rows, cols, type, arr.getHostMemRef()); break; default: arr.create(rows, cols, type); } }
void cv::cuda::createContinuous(int rows, int cols, int type, OutputArray arr) { switch (arr.kind()) { case _InputArray::MAT: ::createContinuousImpl(rows, cols, type, arr.getMatRef()); break; case _InputArray::GPU_MAT: ::createContinuousImpl(rows, cols, type, arr.getGpuMatRef()); break; case _InputArray::CUDA_MEM: ::createContinuousImpl(rows, cols, type, arr.getCudaMemRef()); break; default: arr.create(rows, cols, type); } }
void UMat::copyTo(OutputArray _dst) const { int dtype = _dst.type(); if( _dst.fixedType() && dtype != type() ) { CV_Assert( channels() == CV_MAT_CN(dtype) ); convertTo( _dst, dtype ); return; } if( empty() ) { _dst.release(); return; } size_t i, sz[CV_MAX_DIM], srcofs[CV_MAX_DIM], dstofs[CV_MAX_DIM], esz = elemSize(); for( i = 0; i < (size_t)dims; i++ ) sz[i] = size.p[i]; sz[dims-1] *= esz; ndoffset(srcofs); srcofs[dims-1] *= esz; _dst.create( dims, size.p, type() ); if( _dst.kind() == _InputArray::UMAT ) { UMat dst = _dst.getUMat(); void* srchandle = handle(ACCESS_READ); void* dsthandle = dst.handle(ACCESS_WRITE); if( srchandle == dsthandle && dst.offset == offset ) return; dst.ndoffset(dstofs); CV_Assert(u->currAllocator == dst.u->currAllocator); u->currAllocator->copy(u, dst.u, dims, sz, srcofs, step.p, dstofs, dst.step.p, false); } else { Mat dst = _dst.getMat(); u->currAllocator->download(u, dst.data, dims, sz, srcofs, step.p, dst.step.p); } }
void cv::ogl::Buffer::copyTo(OutputArray arr) const { #ifndef HAVE_OPENGL (void) arr; throw_no_ogl(); #else const int kind = arr.kind(); switch (kind) { case _InputArray::OPENGL_BUFFER: { arr.getOGlBufferRef().copyFrom(*this); break; } case _InputArray::GPU_MAT: { #ifndef HAVE_CUDA throw_no_cuda(); #else GpuMat& dmat = arr.getGpuMatRef(); dmat.create(rows_, cols_, type_); impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows); #endif break; } default: { arr.create(rows_, cols_, type_); Mat mat = arr.getMat(); CV_Assert( mat.isContinuous() ); impl_->copyTo(mat.rows * mat.cols * mat.elemSize(), mat.data); } } #endif }