void cv::cuda::divide(InputArray _src1, InputArray _src2, OutputArray _dst, double scale, int dtype, Stream& stream) { if (_src1.type() == CV_8UC4 && _src2.type() == CV_32FC1) { GpuMat src1 = _src1.getGpuMat(); GpuMat src2 = _src2.getGpuMat(); CV_Assert( src1.size() == src2.size() ); _dst.create(src1.size(), src1.type()); GpuMat dst = _dst.getGpuMat(); divMat_8uc4_32f(src1, src2, dst, stream); } else if (_src1.type() == CV_16SC4 && _src2.type() == CV_32FC1) { GpuMat src1 = _src1.getGpuMat(); GpuMat src2 = _src2.getGpuMat(); CV_Assert( src1.size() == src2.size() ); _dst.create(src1.size(), src1.type()); GpuMat dst = _dst.getGpuMat(); divMat_16sc4_32f(src1, src2, dst, stream); } else { arithm_op(_src1, _src2, _dst, GpuMat(), scale, dtype, stream, divMat, divScalar); } }
void cv::viz::vtkTrajectorySource::SetTrajectory(InputArray _traj) { CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT); CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16)); Mat traj; _traj.getMat().convertTo(traj, CV_64F); const Affine3d* dpath = traj.ptr<Affine3d>(); size_t total = traj.total(); points = vtkSmartPointer<vtkPoints>::New(); points->SetDataType(VTK_DOUBLE); points->SetNumberOfPoints((vtkIdType)total); tensors = vtkSmartPointer<vtkDoubleArray>::New(); tensors->SetNumberOfComponents(9); tensors->SetNumberOfTuples((vtkIdType)total); for(size_t i = 0; i < total; ++i, ++dpath) { Matx33d R = dpath->rotation().t(); // transposed because of tensors->SetTuple((vtkIdType)i, R.val); // column major order Vec3d p = dpath->translation(); points->SetPoint((vtkIdType)i, p.val); } }
double IPPE::PoseSolver::meanSceneDepth(InputArray _objectPoints, InputArray _rvec, InputArray _tvec) { assert((_objectPoints.type() == CV_64FC3) | (_objectPoints.type() == CV_64FC3)); size_t n = _objectPoints.rows() * _objectPoints.cols(); Mat R; Mat q; Rodrigues(_rvec, R); double zBar = 0; for (size_t i = 0; i < n; i++) { cv::Mat p(_objectPoints.getMat().at<Point3d>(i)); q = R * p + _tvec.getMat(); double z; if (q.depth() == CV_64FC1) { z = q.at<double>(2); } else { z = static_cast<double>(q.at<float>(2)); } zBar += z; //if (z <= 0) { // std::cout << "Warning: object point " << i << " projects behind the camera! This should not be allowed. " << std::endl; //} } return zBar / static_cast<double>(n); }
void cv::blendLinear( InputArray _src1, InputArray _src2, InputArray _weights1, InputArray _weights2, OutputArray _dst ) { int type = _src1.type(), depth = CV_MAT_DEPTH(type); Size size = _src1.size(); CV_Assert(depth == CV_8U || depth == CV_32F); CV_Assert(size == _src2.size() && size == _weights1.size() && size == _weights2.size()); CV_Assert(type == _src2.type() && _weights1.type() == CV_32FC1 && _weights2.type() == CV_32FC1); _dst.create(size, type); CV_OCL_RUN(_dst.isUMat(), ocl_blendLinear(_src1, _src2, _weights1, _weights2, _dst)) Mat src1 = _src1.getMat(), src2 = _src2.getMat(), weights1 = _weights1.getMat(), weights2 = _weights2.getMat(), dst = _dst.getMat(); if (depth == CV_8U) { BlendLinearInvoker<uchar> invoker(src1, src2, weights1, weights2, dst); parallel_for_(Range(0, src1.rows), invoker, dst.total()/(double)(1<<16)); } else if (depth == CV_32F) { BlendLinearInvoker<float> invoker(src1, src2, weights1, weights2, dst); parallel_for_(Range(0, src1.rows), invoker, dst.total()/(double)(1<<16)); } }
void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage, const Scalar& _color, DrawMatchesFlags flags ) { CV_INSTRUMENT_REGION(); if( !(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) ) { if (image.type() == CV_8UC3 || image.type() == CV_8UC4) { image.copyTo(outImage); } else if( image.type() == CV_8UC1 ) { cvtColor( image, outImage, COLOR_GRAY2BGR ); } else { CV_Error( Error::StsBadArg, "Incorrect type of input image: " + typeToString(image.type()) ); } } RNG& rng=theRNG(); bool isRandColor = _color == Scalar::all(-1); CV_Assert( !outImage.empty() ); std::vector<KeyPoint>::const_iterator it = keypoints.begin(), end = keypoints.end(); for( ; it != end; ++it ) { Scalar color = isRandColor ? Scalar( rng(256), rng(256), rng(256), 255 ) : _color; _drawKeypoint( outImage, *it, color, flags ); } }
void cv::cuda::multiply(InputArray _src1, InputArray _src2, OutputArray _dst, double scale, int dtype, Stream& stream) { if (_src1.type() == CV_8UC4 && _src2.type() == CV_32FC1) { GpuMat src1 = getInputMat(_src1, stream); GpuMat src2 = getInputMat(_src2, stream); CV_Assert( src1.size() == src2.size() ); GpuMat dst = getOutputMat(_dst, src1.size(), src1.type(), stream); mulMat_8uc4_32f(src1, src2, dst, stream); syncOutput(dst, _dst, stream); } else if (_src1.type() == CV_16SC4 && _src2.type() == CV_32FC1) { GpuMat src1 = getInputMat(_src1, stream); GpuMat src2 = getInputMat(_src2, stream); CV_Assert( src1.size() == src2.size() ); GpuMat dst = getOutputMat(_dst, src1.size(), src1.type(), stream); mulMat_16sc4_32f(src1, src2, dst, stream); syncOutput(dst, _dst, stream); } else { arithm_op(_src1, _src2, _dst, GpuMat(), scale, dtype, stream, mulMat, mulScalar); } }
int matte(InputArray src, OutputArray dst, Point firstPoint, Point secondPoint, float sigmaX, float sigmaY, Size ksize) { CV_Assert((src.type() == CV_8UC3) || (src.type() == CV_32FC3)); CV_Assert((sigmaX > 0.0f) || (sigmaY > 0.0f)); Mat srcImg = src.getMat(); CV_Assert(!(srcImg.empty())); if(srcImg.type() != CV_32FC3) { srcImg.convertTo(srcImg, CV_32FC3, 1.0f/255.0f); } int xsize = firstPoint.x - secondPoint.x; int ysize = firstPoint.y - secondPoint.y; Point topLeft = topleftFind(firstPoint, secondPoint, xsize, ysize); const Scalar black = Scalar(0.0f,0.0f,0.0f); const Scalar white = Scalar(1.0f,1.0f,1.0f); Mat mask(srcImg.rows, srcImg.cols, CV_32FC1, black); ellipse(mask, Point((topLeft.x+xsize/2),(topLeft.y-ysize/2)), Size(xsize/2,ysize/2), 0, 0, 360, white, -1); GaussianBlur(mask, mask, ksize, sigmaX, sigmaY); vector<Mat> ch_img; split(srcImg,ch_img); ch_img[0]=ch_img[0].mul(mask)+1.0f-mask; ch_img[1]=ch_img[1].mul(mask)+1.0f-mask; ch_img[2]=ch_img[2].mul(mask)+1.0f-mask; merge(ch_img,dst); return 0; }
void IPPE::PoseSolver::homographyFromSquarePoints(InputArray _targetPoints, double halfLength, OutputArray H_) { assert((_targetPoints.type() == CV_32FC2) | (_targetPoints.type() == CV_64FC2)); cv::Mat pts = _targetPoints.getMat(); H_.create(3, 3, CV_64FC1); Mat H = H_.getMat(); double p1x, p1y; double p2x, p2y; double p3x, p3y; double p4x, p4y; if (_targetPoints.type() == CV_32FC2) { p1x = -pts.at<Vec2f>(0)(0); p1y = -pts.at<Vec2f>(0)(1); p2x = -pts.at<Vec2f>(1)(0); p2y = -pts.at<Vec2f>(1)(1); p3x = -pts.at<Vec2f>(2)(0); p3y = -pts.at<Vec2f>(2)(1); p4x = -pts.at<Vec2f>(3)(0); p4y = -pts.at<Vec2f>(3)(1); } else { p1x = -pts.at<Vec2d>(0)(0); p1y = -pts.at<Vec2d>(0)(1); p2x = -pts.at<Vec2d>(1)(0); p2y = -pts.at<Vec2d>(1)(1); p3x = -pts.at<Vec2d>(2)(0); p3y = -pts.at<Vec2d>(2)(1); p4x = -pts.at<Vec2d>(3)(0); p4y = -pts.at<Vec2d>(3)(1); } //analytic solution: double detsInv = -1 / (halfLength * (p1x * p2y - p2x * p1y - p1x * p4y + p2x * p3y - p3x * p2y + p4x * p1y + p3x * p4y - p4x * p3y)); H.at<double>(0, 0) = detsInv * (p1x * p3x * p2y - p2x * p3x * p1y - p1x * p4x * p2y + p2x * p4x * p1y - p1x * p3x * p4y + p1x * p4x * p3y + p2x * p3x * p4y - p2x * p4x * p3y); H.at<double>(0, 1) = detsInv * (p1x * p2x * p3y - p1x * p3x * p2y - p1x * p2x * p4y + p2x * p4x * p1y + p1x * p3x * p4y - p3x * p4x * p1y - p2x * p4x * p3y + p3x * p4x * p2y); H.at<double>(0, 2) = detsInv * halfLength * (p1x * p2x * p3y - p2x * p3x * p1y - p1x * p2x * p4y + p1x * p4x * p2y - p1x * p4x * p3y + p3x * p4x * p1y + p2x * p3x * p4y - p3x * p4x * p2y); H.at<double>(1, 0) = detsInv * (p1x * p2y * p3y - p2x * p1y * p3y - p1x * p2y * p4y + p2x * p1y * p4y - p3x * p1y * p4y + p4x * p1y * p3y + p3x * p2y * p4y - p4x * p2y * p3y); H.at<double>(1, 1) = detsInv * (p2x * p1y * p3y - p3x * p1y * p2y - p1x * p2y * p4y + p4x * p1y * p2y + p1x * p3y * p4y - p4x * p1y * p3y - p2x * p3y * p4y + p3x * p2y * p4y); H.at<double>(1, 2) = detsInv * halfLength * (p1x * p2y * p3y - p3x * p1y * p2y - p2x * p1y * p4y + p4x * p1y * p2y - p1x * p3y * p4y + p3x * p1y * p4y + p2x * p3y * p4y - p4x * p2y * p3y); H.at<double>(2, 0) = -detsInv * (p1x * p3y - p3x * p1y - p1x * p4y - p2x * p3y + p3x * p2y + p4x * p1y + p2x * p4y - p4x * p2y); H.at<double>(2, 1) = detsInv * (p1x * p2y - p2x * p1y - p1x * p3y + p3x * p1y + p2x * p4y - p4x * p2y - p3x * p4y + p4x * p3y); H.at<double>(2, 2) = 1.0; }
void NullOutlierRejector::process( Size /*frameSize*/, InputArray points0, InputArray points1, OutputArray mask) { CV_Assert(points0.type() == points1.type()); CV_Assert(points0.getMat().checkVector(2) == points1.getMat().checkVector(2)); int npoints = points0.getMat().checkVector(2); mask.create(1, npoints, CV_8U); Mat mask_ = mask.getMat(); mask_.setTo(1); }
void addNoise(InputArray src_, OutputArray dest_, const double sigma, const double sprate, const int seed) { if(seed!=0) cv::theRNG().state = seed; if (dest_.empty() || dest_.size() != src_.size() || dest_.type() != src_.type()) dest_.create(src_.size(), src_.type()); Mat src = src_.getMat(); Mat dest = dest_.getMat(); if (src.channels() == 1) { addNoiseMono(src, dest, sigma); if (sprate != 0)addNoiseSoltPepperMono(dest, dest, sprate, seed); return; } else { vector<Mat> s(src.channels()); vector<Mat> d(src.channels()); split(src, s); for (int i = 0; i < src.channels(); i++) { addNoiseMono(s[i], d[i], sigma); if (sprate != 0)addNoiseSoltPepperMono(d[i], d[i], sprate, seed); } cv::merge(d, dest); } if (seed != 0) cv::theRNG().state = cv::getTickCount(); }
static void elbp(InputArray src, OutputArray dst, int radius, int neighbors) { int type = src.type(); switch (type) { case CV_8SC1: elbp_<char>(src,dst, radius, neighbors); break; case CV_8UC1: elbp_<unsigned char>(src, dst, radius, neighbors); break; case CV_16SC1: elbp_<short>(src,dst, radius, neighbors); break; case CV_16UC1: elbp_<unsigned short>(src,dst, radius, neighbors); break; case CV_32SC1: elbp_<int>(src,dst, radius, neighbors); break; case CV_32FC1: elbp_<float>(src,dst, radius, neighbors); break; case CV_64FC1: elbp_<double>(src,dst, radius, neighbors); break; default: String error_msg = format("Using Original Local Binary Patterns for feature extraction only works on single-channel images (given %d). Please pass the image data as a grayscale image!", type); CV_Error(CV_StsNotImplemented, error_msg); break; } }
/*! Niblack binarization algorithm. @param src [in] Mat, single channel uchar image. @param dst [out] Mat, result image. @param windowSize [in] int, window size for calculation. @param k [in] int, parameter for local threshold. @return int, 0x0000 = Success. */ int Niblack(InputArray src, OutputArray dst, int windowSize, float k) { if (src.type() != CV_8UC1 || src.empty()) return 0x0001; /*!< source image type not supported. */ /*! update window size, which should be odd. */ if (windowSize < 2) return 0x0002; /*!< window size not supported. */ if (windowSize / 2 == 0) windowSize++; Mat source, destination; Mat sourceUchar = src.getMat(); sourceUchar.convertTo(source, CV_32FC1); /*! calcalte mean and variance via D(x) = E(x^2) - (Ex)^2 */ Mat avg, power, avg_power, power_avg; Mat standard; boxFilter(source, avg, -1, Size(windowSize, windowSize)); pow(avg, 2, avg_power); pow(source, 2, power); boxFilter(power, power_avg, -1, Size(windowSize, windowSize)); sqrt(power_avg - power_avg, standard); /*! calculate local threshold */ Mat threshold = avg + k * standard; /*! Output result */ dst.create(sourceUchar.size(), CV_8UC1); destination = dst.getMat(); destination = source > threshold; return 0x0000; }
void repeat(InputArray _src, int ny, int nx, OutputArray _dst) { CV_Assert( _src.dims() <= 2 ); CV_Assert( ny > 0 && nx > 0 ); Size ssize = _src.size(); _dst.create(ssize.height*ny, ssize.width*nx, _src.type()); CV_OCL_RUN(_dst.isUMat(), ocl_repeat(_src, ny, nx, _dst)) Mat src = _src.getMat(), dst = _dst.getMat(); Size dsize = dst.size(); int esz = (int)src.elemSize(); int x, y; ssize.width *= esz; dsize.width *= esz; for( y = 0; y < ssize.height; y++ ) { for( x = 0; x < dsize.width; x += ssize.width ) memcpy( dst.data + y*dst.step + x, src.data + y*src.step, ssize.width ); } for( ; y < dsize.height; y++ ) memcpy( dst.data + y*dst.step, dst.data + (y - ssize.height)*dst.step, dsize.width ); }
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) { CV_INSTRUMENT_REGION() std::vector<Point2f> corners; if (_image.isUMat()) { UMat ugrayImage; if( _image.type() != CV_8U ) cvtColor( _image, ugrayImage, COLOR_BGR2GRAY ); else ugrayImage = _image.getUMat(); goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask, blockSize, useHarrisDetector, k ); } else { Mat image = _image.getMat(), grayImage = image; if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY ); goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask, blockSize, useHarrisDetector, k ); } keypoints.resize(corners.size()); std::vector<Point2f>::const_iterator corner_it = corners.begin(); std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin(); for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it ) *keypoint_it = KeyPoint( *corner_it, (float)blockSize ); }
static bool ocl_integral( InputArray _src, OutputArray _sum, int sdepth ) { if ( _src.type() != CV_8UC1 || _src.step() % vlen != 0 || _src.offset() % vlen != 0 || !(sdepth == CV_32S || sdepth == CV_32F) ) return false; ocl::Kernel k1("integral_sum_cols", ocl::imgproc::integral_sum_oclsrc, format("-D sdepth=%d", sdepth)); if (k1.empty()) return false; Size size = _src.size(), t_size = Size(((size.height + vlen - 1) / vlen) * vlen, size.width), ssize(size.width + 1, size.height + 1); _sum.create(ssize, sdepth); UMat src = _src.getUMat(), t_sum(t_size, sdepth), sum = _sum.getUMat(); t_sum = t_sum(Range::all(), Range(0, size.height)); int offset = (int)src.offset / vlen, pre_invalid = (int)src.offset % vlen; int vcols = (pre_invalid + src.cols + vlen - 1) / vlen; int sum_offset = (int)sum.offset / vlen; k1.args(ocl::KernelArg::PtrReadOnly(src), ocl::KernelArg::PtrWriteOnly(t_sum), offset, pre_invalid, src.rows, src.cols, (int)src.step, (int)t_sum.step); size_t gt = ((vcols + 1) / 2) * 256, lt = 256; if (!k1.run(1, >, <, false)) return false; ocl::Kernel k2("integral_sum_rows", ocl::imgproc::integral_sum_oclsrc, format("-D sdepth=%d", sdepth)); k2.args(ocl::KernelArg::PtrReadWrite(t_sum), ocl::KernelArg::PtrWriteOnly(sum), t_sum.rows, t_sum.cols, (int)t_sum.step, (int)sum.step, sum_offset); size_t gt2 = t_sum.cols * 32, lt2 = 256; return k2.run(1, >2, <2, false); }
void FeatherBlender::feed(InputArray _img, InputArray mask, Point tl) { Mat img = _img.getMat(); Mat dst = dst_.getMat(ACCESS_RW); CV_Assert(img.type() == CV_16SC3); CV_Assert(mask.type() == CV_8U); createWeightMap(mask, sharpness_, weight_map_); Mat weight_map = weight_map_.getMat(ACCESS_READ); Mat dst_weight_map = dst_weight_map_.getMat(ACCESS_RW); int dx = tl.x - dst_roi_.x; int dy = tl.y - dst_roi_.y; for (int y = 0; y < img.rows; ++y) { const Point3_<short>* src_row = img.ptr<Point3_<short> >(y); Point3_<short>* dst_row = dst.ptr<Point3_<short> >(dy + y); const float* weight_row = weight_map.ptr<float>(y); float* dst_weight_row = dst_weight_map.ptr<float>(dy + y); for (int x = 0; x < img.cols; ++x) { dst_row[dx + x].x += static_cast<short>(src_row[x].x * weight_row[x]); dst_row[dx + x].y += static_cast<short>(src_row[x].y * weight_row[x]); dst_row[dx + x].z += static_cast<short>(src_row[x].z * weight_row[x]); dst_weight_row[dx + x] += weight_row[x]; } } }
cv::GlBuffer::GlBuffer(InputArray mat_, Usage _usage) : rows_(0), cols_(0), type_(0), usage_(_usage) { #ifndef HAVE_OPENGL (void)mat_; (void)_usage; throw_nogl; #else int kind = mat_.kind(); Size _size = mat_.size(); int _type = mat_.type(); if (kind == _InputArray::GPU_MAT) { #if !defined HAVE_CUDA || defined(CUDA_DISABLER) throw_nocuda; #else GpuMat d_mat = mat_.getGpuMat(); impl_ = new Impl(d_mat.rows, d_mat.cols, d_mat.type(), _usage); impl_->copyFrom(d_mat); #endif } else { Mat mat = mat_.getMat(); impl_ = new Impl(mat, _usage); } rows_ = _size.height; cols_ = _size.width; type_ = _type; #endif }
static bool sumTemplate(InputArray _src, UMat & result) { int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); int wdepth = CV_32F, wtype = CV_MAKE_TYPE(wdepth, cn); size_t wgs = ocl::Device::getDefault().maxWorkGroupSize(); int wgs2_aligned = 1; while (wgs2_aligned < (int)wgs) wgs2_aligned <<= 1; wgs2_aligned >>= 1; char cvt[40]; ocl::Kernel k("calcSum", ocl::imgproc::match_template_oclsrc, format("-D CALC_SUM -D T=%s -D T1=%s -D WT=%s -D cn=%d -D convertToWT=%s -D WGS=%d -D WGS2_ALIGNED=%d", ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(wtype), cn, ocl::convertTypeStr(depth, wdepth, cn, cvt), (int)wgs, wgs2_aligned)); if (k.empty()) return false; UMat src = _src.getUMat(); result.create(1, 1, CV_32FC1); ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src), resarg = ocl::KernelArg::PtrWriteOnly(result); k.args(srcarg, src.cols, (int)src.total(), resarg); size_t globalsize = wgs; return k.run(1, &globalsize, &wgs, false); }
void TranslationWarperBase<P>::warpBackward(InputArray src, InputArray K, InputArray R, InputArray t, int interp_mode, int border_mode, Size dst_size, OutputArray dst) { projector_.setCameraParams(K, R, t); Point src_tl, src_br; detectResultRoi(dst_size, src_tl, src_br); Size size = src.size(); CV_Assert(src_br.x - src_tl.x + 1 == size.width && src_br.y - src_tl.y + 1 == size.height); Mat xmap(dst_size, CV_32F); Mat ymap(dst_size, CV_32F); float u, v; for (int y = 0; y < dst_size.height; ++y) { for (int x = 0; x < dst_size.width; ++x) { projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v); xmap.at<float>(y, x) = u - src_tl.x; ymap.at<float>(y, x) = v - src_tl.y; } } dst.create(dst_size, src.type()); remap(src, dst, xmap, ymap, interp_mode, border_mode); }
static bool matchTemplate_SQDIFF_NORMED(InputArray _image, InputArray _templ, OutputArray _result) { matchTemplate(_image, _templ, _result, CV_TM_CCORR); int type = _image.type(), cn = CV_MAT_CN(type); ocl::Kernel k("matchTemplate_SQDIFF_NORMED", ocl::imgproc::match_template_oclsrc, format("-D SQDIFF_NORMED -D T=%s -D cn=%d", ocl::typeToStr(type), cn)); if (k.empty()) return false; UMat image = _image.getUMat(), templ = _templ.getUMat(); _result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F); UMat result = _result.getUMat(); UMat image_sums, image_sqsums; integral(image.reshape(1), image_sums, image_sqsums, CV_32F, CV_32F); UMat templ_sqsum; if (!sumTemplate(_templ, templ_sqsum)) return false; k.args(ocl::KernelArg::ReadOnlyNoSize(image_sqsums), ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols, ocl::KernelArg::PtrReadOnly(templ_sqsum)); size_t globalsize[2] = { result.cols, result.rows }; return k.run(2, globalsize, NULL, false); }
void IPPE::PoseSolver::rot2vec(InputArray _R, OutputArray _r) { assert(_R.type() == CV_64FC1); assert(_R.rows() == 3); assert(_R.cols() == 3); _r.create(3, 1, CV_64FC1); cv::Mat R = _R.getMat(); cv::Mat rvec = _r.getMat(); double trace = R.at<double>(0, 0) + R.at<double>(1, 1) + R.at<double>(2, 2); double w_norm = acos((trace - 1.0) / 2.0); double c0, c1, c2; double eps = std::numeric_limits<float>::epsilon(); double d = 1 / (2 * sin(w_norm)) * w_norm; if (w_norm < eps) //rotation is the identity { rvec.setTo(0); } else { c0 = R.at<double>(2, 1) - R.at<double>(1, 2); c1 = R.at<double>(0, 2) - R.at<double>(2, 0); c2 = R.at<double>(1, 0) - R.at<double>(0, 1); rvec.at<double>(0) = d * c0; rvec.at<double>(1) = d * c1; rvec.at<double>(2) = d * c2; } }
static bool ocl_threshold( InputArray _src, OutputArray _dst, double & thresh, double maxval, int thresh_type ) { int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), kercn = ocl::predictOptimalVectorWidth(_src, _dst), ktype = CV_MAKE_TYPE(depth, kercn); bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0; if ( !(thresh_type == THRESH_BINARY || thresh_type == THRESH_BINARY_INV || thresh_type == THRESH_TRUNC || thresh_type == THRESH_TOZERO || thresh_type == THRESH_TOZERO_INV) || (!doubleSupport && depth == CV_64F)) return false; const char * const thresholdMap[] = { "THRESH_BINARY", "THRESH_BINARY_INV", "THRESH_TRUNC", "THRESH_TOZERO", "THRESH_TOZERO_INV" }; ocl::Kernel k("threshold", ocl::imgproc::threshold_oclsrc, format("-D %s -D T=%s -D T1=%s%s", thresholdMap[thresh_type], ocl::typeToStr(ktype), ocl::typeToStr(depth), doubleSupport ? " -D DOUBLE_SUPPORT" : "")); if (k.empty()) return false; UMat src = _src.getUMat(); _dst.create(src.size(), type); UMat dst = _dst.getUMat(); if (depth <= CV_32S) thresh = cvFloor(thresh); k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst, cn, kercn), ocl::KernelArg::Constant(Mat(1, 1, depth, Scalar::all(thresh))), ocl::KernelArg::Constant(Mat(1, 1, depth, Scalar::all(maxval)))); size_t globalsize[2] = { dst.cols * cn / kercn, dst.rows }; return k.run(2, globalsize, NULL, false); }
ObjectState HTCamshift::update(InputArray _rgbFrame, InputArray _depthFrame, InputArray _mask) { CV_Assert(_rgbFrame.size() == rgbFrameSize && _rgbFrame.type() == rgbFrameType); if(useDepth == true) { CV_Assert(_depthFrame.size() == depthFrameSize && _depthFrame.type() == depthFrameType); } split(_rgbFrame, channels); if(state.valid == false) { calculateFeatureSet(_rgbFrame, _depthFrame); } calculateBackPro(_rgbFrame, _depthFrame, _mask); Rect bounding = state.location.boundingRect(); state.location = CamShift(backPro, bounding, term); return state; }
void GuidedFilterRefImpl::filter(InputArray src_, OutputArray dst_, int dDepth) { if (dDepth == -1) dDepth = src_.depth(); dst_.create(height, width, src_.type()); Mat src = src_.getMat(); Mat dst = dst_.getMat(); int cNum = src.channels(); CV_Assert(height == src.rows && width == src.cols); Mat *Ichannels, *exp_I, **vars_I, **alpha, *beta; Ichannels = new Mat[cNum]; exp_I = new Mat[cNum]; beta = new Mat[cNum]; vars_I = new Mat *[chNum]; alpha = new Mat *[chNum]; for (int i = 0; i < chNum; ++i){ vars_I[i] = new Mat[cNum]; alpha[i] = new Mat[cNum]; } split(src, Ichannels); for (int i = 0; i < cNum; ++i) { Ichannels[i].convertTo(Ichannels[i], CV_32F); meanFilter(Ichannels[i], exp_I[i]); } computeCovGuideAndSrc(cNum, vars_I, Ichannels, exp_I); computeAlpha(cNum, alpha, vars_I); computeBeta(cNum, beta, exp_I, alpha); for (int i = 0; i < chNum + 1; ++i) for (int j = 0; j < cNum; ++j) if (i < chNum) meanFilter(alpha[i][j], alpha[i][j]); else meanFilter(beta[j], beta[j]); applyTransform(cNum, Ichannels, beta, alpha, dDepth); merge(Ichannels, cNum, dst); delete [] Ichannels; delete [] exp_I; delete [] beta; for (int i = 0; i < chNum; ++i) { delete [] vars_I[i]; delete [] alpha[i]; } delete [] vars_I; delete [] alpha; }
void filmGrain(InputArray src, OutputArray dst, int grainValue, int seed) { CV_Assert(!src.empty()); CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC3); Mat image=src.getMat(); Mat noise; noise.create(image.size(), CV_8UC1); RNG rng(seed); rng.fill(noise, RNG::UNIFORM, 0, grainValue); dst.create(src.size(), src.type()); Mat dstMat=dst.getMat(); if(src.type()==CV_8UC3) { cvtColor(noise, noise, COLOR_GRAY2RGB); } dstMat=image+noise; }
static bool ocl_fastNlMeansDenoising(InputArray _src, OutputArray _dst, float h, int templateWindowSize, int searchWindowSize) { int type = _src.type(), cn = CV_MAT_CN(type); Size size = _src.size(); if ( type != CV_8UC1 || type != CV_8UC2 || type != CV_8UC4 ) return false; int templateWindowHalfWize = templateWindowSize / 2; int searchWindowHalfSize = searchWindowSize / 2; templateWindowSize = templateWindowHalfWize * 2 + 1; searchWindowSize = searchWindowHalfSize * 2 + 1; int nblocksx = divUp(size.width, BLOCK_COLS), nblocksy = divUp(size.height, BLOCK_ROWS); int almostTemplateWindowSizeSqBinShift = -1; char cvt[2][40]; String opts = format("-D OP_CALC_FASTNLMEANS -D TEMPLATE_SIZE=%d -D SEARCH_SIZE=%d" " -D uchar_t=%s -D int_t=%s -D BLOCK_COLS=%d -D BLOCK_ROWS=%d" " -D CTA_SIZE=%d -D TEMPLATE_SIZE2=%d -D SEARCH_SIZE2=%d" " -D convert_int_t=%s -D cn=%d -D CTA_SIZE2=%d -D convert_uchar_t=%s", templateWindowSize, searchWindowSize, ocl::typeToStr(type), ocl::typeToStr(CV_32SC(cn)), BLOCK_COLS, BLOCK_ROWS, CTA_SIZE, templateWindowHalfWize, searchWindowHalfSize, ocl::convertTypeStr(CV_8U, CV_32S, cn, cvt[0]), cn, CTA_SIZE >> 1, ocl::convertTypeStr(CV_32S, CV_8U, cn, cvt[1])); ocl::Kernel k("fastNlMeansDenoising", ocl::photo::nlmeans_oclsrc, opts); if (k.empty()) return false; UMat almostDist2Weight; if (!ocl_calcAlmostDist2Weight<float>(almostDist2Weight, searchWindowSize, templateWindowSize, h, cn, almostTemplateWindowSizeSqBinShift)) return false; CV_Assert(almostTemplateWindowSizeSqBinShift >= 0); UMat srcex; int borderSize = searchWindowHalfSize + templateWindowHalfWize; copyMakeBorder(_src, srcex, borderSize, borderSize, borderSize, borderSize, BORDER_DEFAULT); _dst.create(size, type); UMat dst = _dst.getUMat(); int searchWindowSizeSq = searchWindowSize * searchWindowSize; Size upColSumSize(size.width, searchWindowSizeSq * nblocksy); Size colSumSize(nblocksx * templateWindowSize, searchWindowSizeSq * nblocksy); UMat buffer(upColSumSize + colSumSize, CV_32SC(cn)); srcex = srcex(Rect(Point(borderSize, borderSize), size)); k.args(ocl::KernelArg::ReadOnlyNoSize(srcex), ocl::KernelArg::WriteOnly(dst), ocl::KernelArg::PtrReadOnly(almostDist2Weight), ocl::KernelArg::PtrReadOnly(buffer), almostTemplateWindowSizeSqBinShift); size_t globalsize[2] = { nblocksx * CTA_SIZE, nblocksy }, localsize[2] = { CTA_SIZE, 1 }; return k.run(2, globalsize, localsize, false); }
static void _prepareImage(InputArray src, const Mat& dst) { CV_CheckType(src.type(), src.type() == CV_8UC1 || src.type() == CV_8UC3 || src.type() == CV_8UC4, "Unsupported source image"); CV_CheckType(dst.type(), dst.type() == CV_8UC3 || dst.type() == CV_8UC4, "Unsupported destination image"); const int src_cn = src.channels(); const int dst_cn = dst.channels(); if (src_cn == dst_cn) src.copyTo(dst); else if (src_cn == 1) cvtColor(src, dst, dst_cn == 3 ? COLOR_GRAY2BGR : COLOR_GRAY2BGRA); else if (src_cn == 3 && dst_cn == 4) cvtColor(src, dst, COLOR_BGR2BGRA); else if (src_cn == 4 && dst_cn == 3) cvtColor(src, dst, COLOR_BGRA2BGR); else CV_Error(Error::StsInternal, ""); }
void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType ) { int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if (ddepth < 0) ddepth = sdepth; int dtype = CV_MAKE_TYPE(ddepth, cn); _dst.create( _src.size(), dtype ); #ifdef HAVE_TEGRA_OPTIMIZATION if (tegra::useTegra() && scale == 1.0 && delta == 0) { Mat src = _src.getMat(), dst = _dst.getMat(); if (ksize == 3 && tegra::sobel3x3(src, dst, dx, dy, borderType)) return; if (ksize == -1 && tegra::scharr(src, dst, dx, dy, borderType)) return; } #endif #ifdef HAVE_IPP CV_IPP_CHECK() { if (ksize < 0) { if (IPPDerivScharr(_src, _dst, ddepth, dx, dy, scale, delta, borderType)) { CV_IMPL_ADD(CV_IMPL_IPP); return; } } else if (0 < ksize) { if (IPPDerivSobel(_src, _dst, ddepth, dx, dy, ksize, scale, delta, borderType)) { CV_IMPL_ADD(CV_IMPL_IPP); return; } } } #endif int ktype = std::max(CV_32F, std::max(ddepth, sdepth)); Mat kx, ky; getDerivKernels( kx, ky, dx, dy, ksize, false, ktype ); if( scale != 1 ) { // usually the smoothing part is the slowest to compute, // so try to scale it instead of the faster differenciating part if( dx == 0 ) kx *= scale; else ky *= scale; } sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType ); }
cv::Mat cv::viz::vtkTrajectorySource::ExtractPoints(InputArray _traj) { CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT); CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16)); Mat points(1, (int)_traj.total(), CV_MAKETYPE(_traj.depth(), 3)); const Affine3d* dpath = _traj.getMat().ptr<Affine3d>(); const Affine3f* fpath = _traj.getMat().ptr<Affine3f>(); if (_traj.depth() == CV_32F) for(int i = 0; i < points.cols; ++i) points.at<Vec3f>(i) = fpath[i].translation(); if (_traj.depth() == CV_64F) for(int i = 0; i < points.cols; ++i) points.at<Vec3d>(i) = dpath[i].translation(); return points; }
Point CylindricalWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst) { UMat uxmap, uymap; Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap); dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type()); remap(src, dst, uxmap, uymap, interp_mode, border_mode); return dst_roi.tl(); }