void cv::viz::vtkTrajectorySource::SetTrajectory(InputArray _traj) { CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT); CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16)); Mat traj; _traj.getMat().convertTo(traj, CV_64F); const Affine3d* dpath = traj.ptr<Affine3d>(); size_t total = traj.total(); points = vtkSmartPointer<vtkPoints>::New(); points->SetDataType(VTK_DOUBLE); points->SetNumberOfPoints((vtkIdType)total); tensors = vtkSmartPointer<vtkDoubleArray>::New(); tensors->SetNumberOfComponents(9); tensors->SetNumberOfTuples((vtkIdType)total); for(size_t i = 0; i < total; ++i, ++dpath) { Matx33d R = dpath->rotation().t(); // transposed because of tensors->SetTuple((vtkIdType)i, R.val); // column major order Vec3d p = dpath->translation(); points->SetPoint((vtkIdType)i, p.val); } }
void calcPosition( InputArray _tvecs, InputArray _rvecs, InputArray _pts, InputArray _cameraMatrices, InputArray _distortionMatrices, OutputArray _state, OutputArray _covariance ) { Ptr< PositionCalculator > p_pc = PositionCalculator::create(); std::vector< Mat > tvecs, rvecs; _tvecs.getMatVector( tvecs ); _rvecs.getMatVector( rvecs ); CV_Assert( tvecs.size() >= 2 ); CV_Assert( tvecs.size() == rvecs.size() ); Mat pts = _pts.getMat(); CV_Assert( ( tvecs.size() == pts.checkVector( 2, CV_32F, true ) ) ); std::vector< Mat > camera_m, dist_m; if ( _cameraMatrices.kind() == _InputArray::STD_VECTOR_MAT ) { _cameraMatrices.getMatVector( camera_m ); CV_Assert( tvecs.size() == camera_m.size() ); } else { camera_m.push_back( _cameraMatrices.getMat() ); CV_Assert( ( camera_m[0].rows == 3 ) && ( camera_m[0].cols == 3 ) ); } if ( _distortionMatrices.kind() == _InputArray::STD_VECTOR_MAT ) { _distortionMatrices.getMatVector( dist_m ); CV_Assert( tvecs.size() == dist_m.size() ); } else { dist_m.push_back( _distortionMatrices.getMat() ); CV_Assert( ( ( dist_m[0].rows == 5 ) && ( dist_m[0].cols == 1 ) ) || dist_m[0].empty() ); } Mat camera = camera_m[0]; Mat dist = dist_m[0]; for ( size_t i = 0; i < tvecs.size(); ++i ) { if ( camera_m.size() == tvecs.size() ) camera = camera_m[i]; if ( dist_m.size() == tvecs.size() ) dist = dist_m[i]; p_pc->addMeasurement( tvecs[i], rvecs[i], pts.at< Point2f >( i ), camera, dist ); } p_pc->computeState( _state, _covariance ); }
void cv::superres::arrCopy(InputArray src, OutputArray dst) { if (dst.isUMat() || src.isUMat()) { src.copyTo(dst); return; } typedef void (*func_t)(InputArray src, OutputArray dst); static const func_t funcs[10][10] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu }, { 0, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, 0, buf2arr }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, arr2buf, 0 , gpu2gpu }, }; const int src_kind = src.kind() >> _InputArray::KIND_SHIFT; const int dst_kind = dst.kind() >> _InputArray::KIND_SHIFT; CV_Assert( src_kind >= 0 && src_kind < 10 ); CV_Assert( dst_kind >= 0 && dst_kind < 10 ); const func_t func = funcs[src_kind][dst_kind]; CV_Assert( func != 0 ); func(src, dst); }
cv::ogl::Buffer::Buffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0) { #ifndef HAVE_OPENGL (void) arr; (void) target; (void) autoRelease; throw_no_ogl(); #else const int kind = arr.kind(); switch (kind) { case _InputArray::OPENGL_BUFFER: case _InputArray::GPU_MAT: copyFrom(arr, target, autoRelease); break; default: { Mat mat = arr.getMat(); CV_Assert( mat.isContinuous() ); const GLsizeiptr asize = mat.rows * mat.cols * mat.elemSize(); impl_.reset(new Impl(asize, mat.data, target, autoRelease)); rows_ = mat.rows; cols_ = mat.cols; type_ = mat.type(); break; } } #endif }
void cv::superres::arrCopy(InputArray src, OutputArray dst) { typedef void (*func_t)(InputArray src, OutputArray dst); static const func_t funcs[10][10] = { {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu}, {0, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr}, {0, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr}, {0, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, arr2buf, arr2tex, gpu2gpu} }; const int src_kind = src.kind() >> _InputArray::KIND_SHIFT; const int dst_kind = dst.kind() >> _InputArray::KIND_SHIFT; CV_DbgAssert( src_kind >= 0 && src_kind < 10 ); CV_DbgAssert( dst_kind >= 0 && dst_kind < 10 ); const func_t func = funcs[src_kind][dst_kind]; CV_DbgAssert( func != 0 ); func(src, dst); }
cv::GlBuffer::GlBuffer(InputArray mat_, Usage _usage) : rows_(0), cols_(0), type_(0), usage_(_usage) { #ifndef HAVE_OPENGL (void)mat_; (void)_usage; throw_nogl; #else int kind = mat_.kind(); Size _size = mat_.size(); int _type = mat_.type(); if (kind == _InputArray::GPU_MAT) { #if !defined HAVE_CUDA || defined(CUDA_DISABLER) throw_nocuda; #else GpuMat d_mat = mat_.getGpuMat(); impl_ = new Impl(d_mat.rows, d_mat.cols, d_mat.type(), _usage); impl_->copyFrom(d_mat); #endif } else { Mat mat = mat_.getMat(); impl_ = new Impl(mat, _usage); } rows_ = _size.height; cols_ = _size.width; type_ = _type; #endif }
void cv::imshow( const String& winname, InputArray _img ) { CV_TRACE_FUNCTION(); const Size size = _img.size(); #ifndef HAVE_OPENGL CV_Assert(size.width>0 && size.height>0); { Mat img = _img.getMat(); CvMat c_img = cvMat(img); cvShowImage(winname.c_str(), &c_img); } #else const double useGl = getWindowProperty(winname, WND_PROP_OPENGL); CV_Assert(size.width>0 && size.height>0); if (useGl <= 0) { Mat img = _img.getMat(); CvMat c_img = cvMat(img); cvShowImage(winname.c_str(), &c_img); } else { const double autoSize = getWindowProperty(winname, WND_PROP_AUTOSIZE); if (autoSize > 0) { resizeWindow(winname, size.width, size.height); } setOpenGlContext(winname); cv::ogl::Texture2D& tex = ownWndTexs[winname]; if (_img.kind() == _InputArray::CUDA_GPU_MAT) { cv::ogl::Buffer& buf = ownWndBufs[winname]; buf.copyFrom(_img); buf.setAutoRelease(false); tex.copyFrom(buf); tex.setAutoRelease(false); } else { tex.copyFrom(_img); } tex.setAutoRelease(false); setOpenGlDrawCallback(winname, glDrawTextureCallback, &tex); updateWindow(winname); } #endif }
cv::Mat cv::viz::vtkTrajectorySource::ExtractPoints(InputArray _traj) { CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT); CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16)); Mat points(1, (int)_traj.total(), CV_MAKETYPE(_traj.depth(), 3)); const Affine3d* dpath = _traj.getMat().ptr<Affine3d>(); const Affine3f* fpath = _traj.getMat().ptr<Affine3f>(); if (_traj.depth() == CV_32F) for(int i = 0; i < points.cols; ++i) points.at<Vec3f>(i) = fpath[i].translation(); if (_traj.depth() == CV_64F) for(int i = 0; i < points.cols; ++i) points.at<Vec3d>(i) = dpath[i].translation(); return points; }
void cv::ogl::Arrays::setColorArray(InputArray color) { const int cn = color.channels(); CV_Assert( cn == 3 || cn == 4 ); if (color.kind() == _InputArray::OPENGL_BUFFER) color_ = color.getOGlBuffer(); else color_.copyFrom(color); }
Mat getMat(InputArray arr) { if (arr.kind() == _InputArray::GPU_MAT) { Mat m; arr.getGpuMat().download(m); return m; } return arr.getMat(); }
void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease) { #ifndef HAVE_OPENGL (void) arr; (void) target; (void) autoRelease; throw_nogl(); #else const int kind = arr.kind(); if (kind == _InputArray::OPENGL_TEXTURE) { ogl::Texture2D tex = arr.getOGlTexture2D(); tex.copyTo(*this); setAutoRelease(autoRelease); return; } const Size asize = arr.size(); const int atype = arr.type(); create(asize, atype, target, autoRelease); switch (kind) { case _InputArray::OPENGL_BUFFER: { ogl::Buffer buf = arr.getOGlBuffer(); impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype)); break; } case _InputArray::GPU_MAT: { #if !defined HAVE_CUDA || defined(CUDA_DISABLER) throw_nocuda(); #else GpuMat dmat = arr.getGpuMat(); impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows); #endif break; } default: { Mat mat = arr.getMat(); CV_Assert( mat.isContinuous() ); impl_->copyFrom(asize.area() * CV_ELEM_SIZE(atype), mat.data); } } #endif }
void cv::ogl::Arrays::setNormalArray(InputArray normal) { const int cn = normal.channels(); const int depth = normal.depth(); CV_Assert( cn == 3 ); CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); if (normal.kind() == _InputArray::OPENGL_BUFFER) normal_ = normal.getOGlBuffer(); else normal_.copyFrom(normal); }
void cv::ogl::Arrays::setTexCoordArray(InputArray texCoord) { const int cn = texCoord.channels(); const int depth = texCoord.depth(); CV_Assert( cn >= 1 && cn <= 4 ); CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); if (texCoord.kind() == _InputArray::OPENGL_BUFFER) texCoord_ = texCoord.getOGlBuffer(); else texCoord_.copyFrom(texCoord); }
void cv::viz::writeTrajectory(InputArray _traj, const String& files_format, int start, const String& tag) { if (_traj.kind() == _InputArray::STD_VECTOR_MAT) { #if CV_MAJOR_VERSION < 3 std::vector<Mat>& v = *(std::vector<Mat>*)_traj.obj; #else std::vector<Mat>& v = *(std::vector<Mat>*)_traj.getObj(); #endif for(size_t i = 0, index = max(0, start); i < v.size(); ++i, ++index) { Affine3d affine; Mat pose = v[i]; CV_Assert(pose.type() == CV_32FC(16) || pose.type() == CV_64FC(16)); pose.copyTo(affine.matrix); writePose(cv::format(files_format.c_str(), index), affine, tag); } return; } if (_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT) { CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16)); Mat traj = _traj.getMat(); if (traj.depth() == CV_32F) for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index) writePose(cv::format(files_format.c_str(), index), traj.at<Affine3f>((int)i), tag); if (traj.depth() == CV_64F) for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index) writePose(cv::format(files_format.c_str(), index), traj.at<Affine3d>((int)i), tag); return; } CV_Error(Error::StsError, "Unsupported array kind"); }
void cv::ogl::Arrays::setVertexArray(InputArray vertex) { const int cn = vertex.channels(); const int depth = vertex.depth(); CV_Assert( cn == 2 || cn == 3 || cn == 4 ); CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); if (vertex.kind() == _InputArray::OPENGL_BUFFER) vertex_ = vertex.getOGlBuffer(); else vertex_.copyFrom(vertex); size_ = vertex_.size().area(); }
UMat cv::superres::arrGetUMat(InputArray arr, UMat& buf) { switch (arr.kind()) { case _InputArray::CUDA_GPU_MAT: arr.getGpuMat().download(buf); return buf; case _InputArray::OPENGL_BUFFER: arr.getOGlBuffer().copyTo(buf); return buf; default: return arr.getUMat(); } }
GpuMat cv::superres::arrGetGpuMat(InputArray arr, GpuMat& buf) { switch (arr.kind()) { case _InputArray::GPU_MAT: return arr.getGpuMat(); case _InputArray::OPENGL_BUFFER: arr.getOGlBuffer().copyTo(buf); return buf; default: buf.upload(arr.getMat()); return buf; } }
void cv::GlTexture::copyFrom(InputArray mat_, bool bgra) { #ifndef HAVE_OPENGL (void)mat_; (void)bgra; throw_nogl; #else int kind = mat_.kind(); Size _size = mat_.size(); int _type = mat_.type(); create(_size, _type); switch(kind) { case _InputArray::OPENGL_TEXTURE: { GlTexture tex = mat_.getGlTexture(); *this = tex; break; } case _InputArray::OPENGL_BUFFER: { GlBuffer buf = mat_.getGlBuffer(); impl_->copyFrom(buf, bgra); break; } case _InputArray::GPU_MAT: { #if !defined HAVE_CUDA || defined(CUDA_DISABLER) throw_nocuda; #else GpuMat d_mat = mat_.getGpuMat(); buf_.copyFrom(d_mat); impl_->copyFrom(buf_, bgra); #endif break; } default: { Mat mat = mat_.getMat(); impl_->copyFrom(mat, bgra); } } #endif }
static bool ocl_norm( InputArray _src, int normType, InputArray _mask, double & result ) { const ocl::Device & d = ocl::Device::getDefault(); #ifdef __ANDROID__ if (d.isNVidia()) return false; #endif const int cn = _src.channels(); if (cn > 4) return false; int type = _src.type(), depth = CV_MAT_DEPTH(type); bool doubleSupport = d.doubleFPConfig() > 0, haveMask = _mask.kind() != _InputArray::NONE; if ( !(normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR) || (!doubleSupport && depth == CV_64F)) return false; UMat src = _src.getUMat(); if (normType == NORM_INF) { if (!ocl_minMaxIdx(_src, NULL, &result, NULL, NULL, _mask, std::max(depth, CV_32S), depth != CV_8U && depth != CV_16U)) return false; } else if (normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR) { Scalar sc; bool unstype = depth == CV_8U || depth == CV_16U; if ( !ocl_sum(haveMask ? src : src.reshape(1), sc, normType == NORM_L2 || normType == NORM_L2SQR ? OCL_OP_SUM_SQR : (unstype ? OCL_OP_SUM : OCL_OP_SUM_ABS), _mask) ) return false; double s = 0.0; for (int i = 0; i < (haveMask ? cn : 1); ++i) s += sc[i]; result = normType == NORM_L1 || normType == NORM_L2SQR ? s : std::sqrt(s); } return true; }
cv::GlTexture::GlTexture(InputArray mat_, bool bgra) : rows_(0), cols_(0), type_(0), buf_(GlBuffer::TEXTURE_BUFFER) { #ifndef HAVE_OPENGL (void)mat_; (void)bgra; throw_nogl; #else int kind = mat_.kind(); Size _size = mat_.size(); int _type = mat_.type(); switch (kind) { case _InputArray::OPENGL_BUFFER: { GlBuffer buf = mat_.getGlBuffer(); impl_ = new Impl(buf, bgra); break; } case _InputArray::GPU_MAT: { #if !defined HAVE_CUDA || defined(CUDA_DISABLER) throw_nocuda; #else GpuMat d_mat = mat_.getGpuMat(); GlBuffer buf(d_mat, GlBuffer::TEXTURE_BUFFER); impl_ = new Impl(buf, bgra); #endif break; } default: { Mat mat = mat_.getMat(); impl_ = new Impl(mat, bgra); break; } } rows_ = _size.height; cols_ = _size.width; type_ = _type; #endif }
void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease) { #ifndef HAVE_OPENGL (void) arr; (void) target; (void) autoRelease; throw_no_ogl(); #else const int kind = arr.kind(); const Size asize = arr.size(); const int atype = arr.type(); create(asize, atype, target, autoRelease); switch (kind) { case _InputArray::OPENGL_BUFFER: { ogl::Buffer buf = arr.getOGlBuffer(); impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype)); break; } case _InputArray::CUDA_GPU_MAT: { #ifndef HAVE_CUDA throw_no_cuda(); #else GpuMat dmat = arr.getGpuMat(); impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows); #endif break; } default: { Mat mat = arr.getMat(); CV_Assert( mat.isContinuous() ); impl_->copyFrom(asize.area() * CV_ELEM_SIZE(atype), mat.data); } } #endif }
void cv::GlBuffer::copyFrom(InputArray mat_) { #ifndef HAVE_OPENGL (void)mat_; throw_nogl; #else int kind = mat_.kind(); Size _size = mat_.size(); int _type = mat_.type(); create(_size, _type); switch (kind) { case _InputArray::OPENGL_BUFFER: { GlBuffer buf = mat_.getGlBuffer(); *this = buf; break; } case _InputArray::GPU_MAT: { #if !defined HAVE_CUDA || defined(CUDA_DISABLER) throw_nocuda; #else GpuMat d_mat = mat_.getGpuMat(); impl_->copyFrom(d_mat); #endif break; } default: { Mat mat = mat_.getMat(); impl_->copyFrom(mat, usage_); } } #endif }
GpuMat cv::cuda::getInputMat(InputArray _src, Stream& stream) { GpuMat src; #ifndef HAVE_CUDA (void) _src; (void) stream; throw_no_cuda(); #else if (_src.kind() == _InputArray::CUDA_GPU_MAT) { src = _src.getGpuMat(); } else if (!_src.empty()) { BufferPool pool(stream); src = pool.getBuffer(_src.size(), _src.type()); src.upload(_src, stream); } #endif return src; }
cv::ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE) { #ifndef HAVE_OPENGL (void) arr; (void) autoRelease; throw_no_ogl(); #else const int kind = arr.kind(); const Size asize = arr.size(); const int atype = arr.type(); const int depth = CV_MAT_DEPTH(atype); const int cn = CV_MAT_CN(atype); CV_Assert( depth <= CV_32F ); CV_Assert( cn == 1 || cn == 3 || cn == 4 ); const Format internalFormats[] = { NONE, DEPTH_COMPONENT, NONE, RGB, RGBA }; const GLenum srcFormats[] = { 0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA }; switch (kind) { case _InputArray::OPENGL_BUFFER: { ogl::Buffer buf = arr.getOGlBuffer(); buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease)); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); break; } case _InputArray::GPU_MAT: { #ifndef HAVE_CUDA throw_no_cuda(); #else GpuMat dmat = arr.getGpuMat(); ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER); buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease)); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); #endif break; } default: { Mat mat = arr.getMat(); CV_Assert( mat.isContinuous() ); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data, autoRelease)); break; } } rows_ = asize.height; cols_ = asize.width; format_ = internalFormats[cn]; #endif }
void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scalar color) { #ifndef HAVE_OPENGL (void) arr; (void) indices; (void) mode; (void) color; throw_no_ogl(); #else if (!arr.empty() && !indices.empty()) { gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0); arr.bind(); const int kind = indices.kind(); switch (kind) { case _InputArray::OPENGL_BUFFER : { ogl::Buffer buf = indices.getOGlBuffer(); const int depth = buf.depth(); CV_Assert( buf.channels() == 1 ); CV_Assert( depth <= CV_32S ); GLenum type; if (depth < CV_16U) type = gl::UNSIGNED_BYTE; else if (depth < CV_32S) type = gl::UNSIGNED_SHORT; else type = gl::UNSIGNED_INT; buf.bind(ogl::Buffer::ELEMENT_ARRAY_BUFFER); gl::DrawElements(mode, buf.size().area(), type, 0); ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER); break; } default: { Mat mat = indices.getMat(); const int depth = mat.depth(); CV_Assert( mat.channels() == 1 ); CV_Assert( depth <= CV_32S ); CV_Assert( mat.isContinuous() ); GLenum type; if (depth < CV_16U) type = gl::UNSIGNED_BYTE; else if (depth < CV_32S) type = gl::UNSIGNED_SHORT; else type = gl::UNSIGNED_INT; ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER); gl::DrawElements(mode, mat.size().area(), type, mat.data); } } } #endif }
void cv::ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease) { #ifndef HAVE_OPENGL (void) arr; (void) autoRelease; throw_nogl(); #else const int kind = arr.kind(); const Size asize = arr.size(); const int atype = arr.type(); const int depth = CV_MAT_DEPTH(atype); const int cn = CV_MAT_CN(atype); CV_Assert( depth <= CV_32F ); CV_Assert( cn == 1 || cn == 3 || cn == 4 ); const Format internalFormats[] = { NONE, DEPTH_COMPONENT, NONE, RGB, RGBA }; const GLenum srcFormats[] = { 0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA }; create(asize, internalFormats[cn], autoRelease); switch(kind) { case _InputArray::OPENGL_BUFFER: { ogl::Buffer buf = arr.getOGlBuffer(); buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); break; } case _InputArray::GPU_MAT: { #if !defined HAVE_CUDA || defined(CUDA_DISABLER) throw_nocuda(); #else GpuMat dmat = arr.getGpuMat(); ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER); buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); #endif break; } default: { Mat mat = arr.getMat(); CV_Assert( mat.isContinuous() ); ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data); } } #endif }
void cv::imshow( const std::string& winname, InputArray _img ) { #ifndef HAVE_OPENGL Mat img = _img.getMat(); CvMat c_img = img; cvShowImage(winname.c_str(), &c_img); #else const double useGl = getWindowProperty(winname, WND_PROP_OPENGL); if (useGl <= 0) { Mat img = _img.getMat(); CvMat c_img = img; cvShowImage(winname.c_str(), &c_img); } else { const double autoSize = getWindowProperty(winname, WND_PROP_AUTOSIZE); if (autoSize > 0) { Size size = _img.size(); resizeWindow(winname, size.width, size.height); } setOpenGlContext(winname); if (_img.kind() == _InputArray::OPENGL_TEXTURE) { cv::ogl::Texture2D& tex = wndTexs[winname]; tex = _img.getOGlTexture2D(); tex.setAutoRelease(false); setOpenGlDrawCallback(winname, glDrawTextureCallback, &tex); } else { cv::ogl::Texture2D& tex = ownWndTexs[winname]; if (_img.kind() == _InputArray::GPU_MAT) { cv::ogl::Buffer& buf = ownWndBufs[winname]; buf.copyFrom(_img); buf.setAutoRelease(false); tex.copyFrom(buf); tex.setAutoRelease(false); } else { tex.copyFrom(_img); } tex.setAutoRelease(false); setOpenGlDrawCallback(winname, glDrawTextureCallback, &tex); } updateWindow(winname); } #endif }