bool imwrite( const String& filename, InputArray _img, const std::vector<int>& params ) { Mat img = _img.getMat(); return imwrite_(filename, img, params, false); }
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response) { std::vector<Mat> images; src.getMatVector(images); Mat times = _times.getMat(); CV_Assert(images.size() == times.total()); checkImageDimensions(images); CV_Assert(images[0].depth() == CV_8U); int channels = images[0].channels(); Size size = images[0].size(); int CV_32FCC = CV_MAKETYPE(CV_32F, channels); dst.create(images[0].size(), CV_32FCC); Mat result = dst.getMat(); Mat response = input_response.getMat(); if(response.empty()) { response = linearResponse(channels); response.at<Vec3f>(0) = response.at<Vec3f>(1); } log(response, response); CV_Assert(response.rows == LDR_SIZE && response.cols == 1 && response.channels() == channels); Mat exp_values(times); log(exp_values, exp_values); result = Mat::zeros(size, CV_32FCC); std::vector<Mat> result_split; split(result, result_split); Mat weight_sum = Mat::zeros(size, CV_32F); for(size_t i = 0; i < images.size(); i++) { std::vector<Mat> splitted; split(images[i], splitted); Mat w = Mat::zeros(size, CV_32F); for(int c = 0; c < channels; c++) { LUT(splitted[c], weights, splitted[c]); w += splitted[c]; } w /= channels; Mat response_img; LUT(images[i], response, response_img); split(response_img, splitted); for(int c = 0; c < channels; c++) { result_split[c] += w.mul(splitted[c] - exp_values.at<float>((int)i)); } weight_sum += w; } weight_sum = 1.0f / weight_sum; for(int c = 0; c < channels; c++) { result_split[c] = result_split[c].mul(weight_sum); } merge(result_split, result); exp(result, result); }
static bool openvx_sobel(InputArray _src, OutputArray _dst, int dx, int dy, int ksize, double scale, double delta, int borderType) { if (_src.type() != CV_8UC1 || _dst.type() != CV_16SC1 || ksize != 3 || scale != 1.0 || delta != 0.0 || (dx | dy) != 1 || (dx + dy) != 1 || _src.cols() < ksize || _src.rows() < ksize || ovx::skipSmallImages<VX_KERNEL_SOBEL_3x3>(_src.cols(), _src.rows()) ) return false; Mat src = _src.getMat(); Mat dst = _dst.getMat(); if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix()) return false; //Process isolated borders only vx_enum border; switch (borderType & ~BORDER_ISOLATED) { case BORDER_CONSTANT: border = VX_BORDER_CONSTANT; break; case BORDER_REPLICATE: // border = VX_BORDER_REPLICATE; // break; default: return false; } try { ivx::Context ctx = ovx::getOpenVXContext(); //if ((vx_size)ksize > ctx.convolutionMaxDimension()) // return false; Mat a; if (dst.data != src.data) a = src; else src.copyTo(a); ivx::Image ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8, ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data), ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_S16, ivx::Image::createAddressing(dst.cols, dst.rows, 2, (vx_int32)(dst.step)), dst.data); //ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments //since OpenVX standard says nothing about thread-safety for now ivx::border_t prevBorder = ctx.immediateBorder(); ctx.setImmediateBorder(border, (vx_uint8)(0)); if(dx) ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, ib, NULL)); else ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, NULL, ib)); ctx.setImmediateBorder(prevBorder); } catch (ivx::RuntimeError & e) { VX_DbgThrow(e.what()); } catch (ivx::WrapperError & e) { VX_DbgThrow(e.what()); } return true; }
void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, double scale, double delta, int borderType ) { int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if (ddepth < 0) ddepth = sdepth; _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) ); #if defined HAVE_IPP && !defined HAVE_IPP_ICV_ONLY if ((ksize == 3 || ksize == 5) && ((borderType & BORDER_ISOLATED) != 0 || !_src.isSubmatrix()) && ((stype == CV_8UC1 && ddepth == CV_16S) || (ddepth == CV_32F && stype == CV_32FC1))) { int iscale = saturate_cast<int>(scale), idelta = saturate_cast<int>(delta); bool floatScale = std::fabs(scale - iscale) > DBL_EPSILON, needScale = iscale != 1; bool floatDelta = std::fabs(delta - idelta) > DBL_EPSILON, needDelta = delta != 0; int borderTypeNI = borderType & ~BORDER_ISOLATED; Mat src = _src.getMat(), dst = _dst.getMat(); if (src.data != dst.data) { Ipp32s bufsize; IppStatus status = (IppStatus)-1; IppiSize roisize = { src.cols, src.rows }; IppiMaskSize masksize = ksize == 3 ? ippMskSize3x3 : ippMskSize5x5; IppiBorderType borderTypeIpp = ippiGetBorderType(borderTypeNI); #define IPP_FILTER_LAPLACIAN(ippsrctype, ippdsttype, ippfavor) \ do \ { \ if (borderTypeIpp >= 0 && ippiFilterLaplacianGetBufferSize_##ippfavor##_C1R(roisize, masksize, &bufsize) >= 0) \ { \ Ipp8u * buffer = ippsMalloc_8u(bufsize); \ status = ippiFilterLaplacianBorder_##ippfavor##_C1R((const ippsrctype *)src.data, (int)src.step, (ippdsttype *)dst.data, \ (int)dst.step, roisize, masksize, borderTypeIpp, 0, buffer); \ ippsFree(buffer); \ } \ } while ((void)0, 0) CV_SUPPRESS_DEPRECATED_START if (sdepth == CV_8U && ddepth == CV_16S && !floatScale && !floatDelta) { IPP_FILTER_LAPLACIAN(Ipp8u, Ipp16s, 8u16s); if (needScale && status >= 0) status = ippiMulC_16s_C1IRSfs((Ipp16s)iscale, (Ipp16s *)dst.data, (int)dst.step, roisize, 0); if (needDelta && status >= 0) status = ippiAddC_16s_C1IRSfs((Ipp16s)idelta, (Ipp16s *)dst.data, (int)dst.step, roisize, 0); } else if (sdepth == CV_32F && ddepth == CV_32F) { IPP_FILTER_LAPLACIAN(Ipp32f, Ipp32f, 32f); if (needScale && status >= 0) status = ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, roisize); if (needDelta && status >= 0) status = ippiAddC_32f_C1IR((Ipp32f)delta, (Ipp32f *)dst.data, (int)dst.step, roisize); } CV_SUPPRESS_DEPRECATED_END if (status >= 0) return; }
int recoverPose( InputArray E, InputArray _points1, InputArray _points2, InputArray _cameraMatrix, OutputArray _R, OutputArray _t, InputOutputArray _mask) { Mat points1, points2, cameraMatrix; _points1.getMat().convertTo(points1, CV_64F); _points2.getMat().convertTo(points2, CV_64F); _cameraMatrix.getMat().convertTo(cameraMatrix, CV_64F); int npoints = points1.checkVector(2); CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints && points1.type() == points2.type()); CV_Assert(cameraMatrix.rows == 3 && cameraMatrix.cols == 3 && cameraMatrix.channels() == 1); if (points1.channels() > 1) { points1 = points1.reshape(1, npoints); points2 = points2.reshape(1, npoints); } double fx = cameraMatrix.at<double>(0,0); double fy = cameraMatrix.at<double>(1,1); double cx = cameraMatrix.at<double>(0,2); double cy = cameraMatrix.at<double>(1,2); points1.col(0) = (points1.col(0) - cx) / fx; points2.col(0) = (points2.col(0) - cx) / fx; points1.col(1) = (points1.col(1) - cy) / fy; points2.col(1) = (points2.col(1) - cy) / fy; points1 = points1.t(); points2 = points2.t(); Mat R1, R2, t; decomposeEssentialMat(E, R1, R2, t); Mat P0 = Mat::eye(3, 4, R1.type()); Mat P1(3, 4, R1.type()), P2(3, 4, R1.type()), P3(3, 4, R1.type()), P4(3, 4, R1.type()); P1(Range::all(), Range(0, 3)) = R1 * 1.0; P1.col(3) = t * 1.0; P2(Range::all(), Range(0, 3)) = R2 * 1.0; P2.col(3) = t * 1.0; P3(Range::all(), Range(0, 3)) = R1 * 1.0; P3.col(3) = -t * 1.0; P4(Range::all(), Range(0, 3)) = R2 * 1.0; P4.col(3) = -t * 1.0; // Do the cheirality check. // Notice here a threshold dist is used to filter // out far away points (i.e. infinite points) since // there depth may vary between postive and negtive. double dist = 50.0; Mat Q; triangulatePoints(P0, P1, points1, points2, Q); Mat mask1 = Q.row(2).mul(Q.row(3)) > 0; Q.row(0) /= Q.row(3); Q.row(1) /= Q.row(3); Q.row(2) /= Q.row(3); Q.row(3) /= Q.row(3); mask1 = (Q.row(2) < dist) & mask1; Q = P1 * Q; mask1 = (Q.row(2) > 0) & mask1; mask1 = (Q.row(2) < dist) & mask1; triangulatePoints(P0, P2, points1, points2, Q); Mat mask2 = Q.row(2).mul(Q.row(3)) > 0; Q.row(0) /= Q.row(3); Q.row(1) /= Q.row(3); Q.row(2) /= Q.row(3); Q.row(3) /= Q.row(3); mask2 = (Q.row(2) < dist) & mask2; Q = P2 * Q; mask2 = (Q.row(2) > 0) & mask2; mask2 = (Q.row(2) < dist) & mask2; triangulatePoints(P0, P3, points1, points2, Q); Mat mask3 = Q.row(2).mul(Q.row(3)) > 0; Q.row(0) /= Q.row(3); Q.row(1) /= Q.row(3); Q.row(2) /= Q.row(3); Q.row(3) /= Q.row(3); mask3 = (Q.row(2) < dist) & mask3; Q = P3 * Q; mask3 = (Q.row(2) > 0) & mask3; mask3 = (Q.row(2) < dist) & mask3; triangulatePoints(P0, P4, points1, points2, Q); Mat mask4 = Q.row(2).mul(Q.row(3)) > 0; Q.row(0) /= Q.row(3); Q.row(1) /= Q.row(3); Q.row(2) /= Q.row(3); Q.row(3) /= Q.row(3); mask4 = (Q.row(2) < dist) & mask4; Q = P4 * Q; mask4 = (Q.row(2) > 0) & mask4; mask4 = (Q.row(2) < dist) & mask4; mask1 = mask1.t(); mask2 = mask2.t(); mask3 = mask3.t(); mask4 = mask4.t(); // If _mask is given, then use it to filter outliers. if (!_mask.empty()) { Mat mask = _mask.getMat(); CV_Assert(mask.size() == mask1.size()); bitwise_and(mask, mask1, mask1); bitwise_and(mask, mask2, mask2); bitwise_and(mask, mask3, mask3); bitwise_and(mask, mask4, mask4); } if (_mask.empty() && _mask.needed()) { _mask.create(mask1.size(), CV_8U); } CV_Assert(_R.needed() && _t.needed()); _R.create(3, 3, R1.type()); _t.create(3, 1, t.type()); int good1 = countNonZero(mask1); int good2 = countNonZero(mask2); int good3 = countNonZero(mask3); int good4 = countNonZero(mask4); if (good1 >= good2 && good1 >= good3 && good1 >= good4) { R1.copyTo(_R); t.copyTo(_t); if (_mask.needed()) mask1.copyTo(_mask); return good1; } else if (good2 >= good1 && good2 >= good3 && good2 >= good4) { R2.copyTo(_R); t.copyTo(_t); if (_mask.needed()) mask2.copyTo(_mask); return good2; } else if (good3 >= good1 && good3 >= good2 && good3 >= good4) { t = -t; R1.copyTo(_R); t.copyTo(_t); if (_mask.needed()) mask3.copyTo(_mask); return good3; } else { t = -t; R2.copyTo(_R); t.copyTo(_t); if (_mask.needed()) mask4.copyTo(_mask); return good4; } }
static bool ocl_Canny(InputArray _src, const UMat& dx_, const UMat& dy_, OutputArray _dst, float low_thresh, float high_thresh, int aperture_size, bool L2gradient, int cn, const Size & size) { CV_INSTRUMENT_REGION_OPENCL() UMat map; const ocl::Device &dev = ocl::Device::getDefault(); int max_wg_size = (int)dev.maxWorkGroupSize(); int lSizeX = 32; int lSizeY = max_wg_size / 32; if (lSizeY == 0) { lSizeX = 16; lSizeY = max_wg_size / 16; } if (lSizeY == 0) { lSizeY = 1; } if (aperture_size == 7) { low_thresh = low_thresh / 16.0f; high_thresh = high_thresh / 16.0f; } if (L2gradient) { low_thresh = std::min(32767.0f, low_thresh); high_thresh = std::min(32767.0f, high_thresh); if (low_thresh > 0) low_thresh *= low_thresh; if (high_thresh > 0) high_thresh *= high_thresh; } int low = cvFloor(low_thresh), high = cvFloor(high_thresh); if (!useCustomDeriv && aperture_size == 3 && !_src.isSubmatrix()) { /* stage1_with_sobel: Sobel operator Calc magnitudes Non maxima suppression Double thresholding */ char cvt[40]; ocl::Kernel with_sobel("stage1_with_sobel", ocl::imgproc::canny_oclsrc, format("-D WITH_SOBEL -D cn=%d -D TYPE=%s -D convert_floatN=%s -D floatN=%s -D GRP_SIZEX=%d -D GRP_SIZEY=%d%s", cn, ocl::memopTypeToStr(_src.depth()), ocl::convertTypeStr(_src.depth(), CV_32F, cn, cvt), ocl::typeToStr(CV_MAKE_TYPE(CV_32F, cn)), lSizeX, lSizeY, L2gradient ? " -D L2GRAD" : "")); if (with_sobel.empty()) return false; UMat src = _src.getUMat(); map.create(size, CV_32S); with_sobel.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnlyNoSize(map), (float) low, (float) high); size_t globalsize[2] = { (size_t)size.width, (size_t)size.height }, localsize[2] = { (size_t)lSizeX, (size_t)lSizeY }; if (!with_sobel.run(2, globalsize, localsize, false)) return false; } else { /* stage1_without_sobel: Calc magnitudes Non maxima suppression Double thresholding */ double scale = 1.0; if (aperture_size == 7) { scale = 1 / 16.0; } UMat dx, dy; if (!useCustomDeriv) { Sobel(_src, dx, CV_16S, 1, 0, aperture_size, scale, 0, BORDER_REPLICATE); Sobel(_src, dy, CV_16S, 0, 1, aperture_size, scale, 0, BORDER_REPLICATE); } else { dx = dx_; dy = dy_; } ocl::Kernel without_sobel("stage1_without_sobel", ocl::imgproc::canny_oclsrc, format("-D WITHOUT_SOBEL -D cn=%d -D GRP_SIZEX=%d -D GRP_SIZEY=%d%s", cn, lSizeX, lSizeY, L2gradient ? " -D L2GRAD" : "")); if (without_sobel.empty()) return false; map.create(size, CV_32S); without_sobel.args(ocl::KernelArg::ReadOnlyNoSize(dx), ocl::KernelArg::ReadOnlyNoSize(dy), ocl::KernelArg::WriteOnly(map), low, high); size_t globalsize[2] = { (size_t)size.width, (size_t)size.height }, localsize[2] = { (size_t)lSizeX, (size_t)lSizeY }; if (!without_sobel.run(2, globalsize, localsize, false)) return false; } int PIX_PER_WI = 8; /* stage2: hysteresis (add weak edges if they are connected with strong edges) */ int sizey = lSizeY / PIX_PER_WI; if (sizey == 0) sizey = 1; size_t globalsize[2] = { (size_t)size.width, ((size_t)size.height + PIX_PER_WI - 1) / PIX_PER_WI }, localsize[2] = { (size_t)lSizeX, (size_t)sizey }; ocl::Kernel edgesHysteresis("stage2_hysteresis", ocl::imgproc::canny_oclsrc, format("-D STAGE2 -D PIX_PER_WI=%d -D LOCAL_X=%d -D LOCAL_Y=%d", PIX_PER_WI, lSizeX, sizey)); if (edgesHysteresis.empty()) return false; edgesHysteresis.args(ocl::KernelArg::ReadWrite(map)); if (!edgesHysteresis.run(2, globalsize, localsize, false)) return false; // get edges ocl::Kernel getEdgesKernel("getEdges", ocl::imgproc::canny_oclsrc, format("-D GET_EDGES -D PIX_PER_WI=%d", PIX_PER_WI)); if (getEdgesKernel.empty()) return false; _dst.create(size, CV_8UC1); UMat dst = _dst.getUMat(); getEdgesKernel.args(ocl::KernelArg::ReadOnly(map), ocl::KernelArg::WriteOnlyNoSize(dst)); return getEdgesKernel.run(2, globalsize, NULL, false); }
cv::RotatedRect cv::CamShift( InputArray _probImage, Rect& window, TermCriteria criteria ) { CV_INSTRUMENT_REGION() const int TOLERANCE = 10; Size size; Mat mat; UMat umat; bool isUMat = _probImage.isUMat(); if (isUMat) umat = _probImage.getUMat(), size = umat.size(); else mat = _probImage.getMat(), size = mat.size(); meanShift( _probImage, window, criteria ); window.x -= TOLERANCE; if( window.x < 0 ) window.x = 0; window.y -= TOLERANCE; if( window.y < 0 ) window.y = 0; window.width += 2 * TOLERANCE; if( window.x + window.width > size.width ) window.width = size.width - window.x; window.height += 2 * TOLERANCE; if( window.y + window.height > size.height ) window.height = size.height - window.y; // Calculating moments in new center mass Moments m = isUMat ? moments(umat(window)) : moments(mat(window)); double m00 = m.m00, m10 = m.m10, m01 = m.m01; double mu11 = m.mu11, mu20 = m.mu20, mu02 = m.mu02; if( fabs(m00) < DBL_EPSILON ) return RotatedRect(); double inv_m00 = 1. / m00; int xc = cvRound( m10 * inv_m00 + window.x ); int yc = cvRound( m01 * inv_m00 + window.y ); double a = mu20 * inv_m00, b = mu11 * inv_m00, c = mu02 * inv_m00; // Calculating width & height double square = std::sqrt( 4 * b * b + (a - c) * (a - c) ); // Calculating orientation double theta = atan2( 2 * b, a - c + square ); // Calculating width & length of figure double cs = cos( theta ); double sn = sin( theta ); double rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02; double rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02; double length = std::sqrt( rotate_a * inv_m00 ) * 4; double width = std::sqrt( rotate_c * inv_m00 ) * 4; // In case, when tetta is 0 or 1.57... the Length & Width may be exchanged if( length < width ) { std::swap( length, width ); std::swap( cs, sn ); theta = CV_PI*0.5 - theta; } // Saving results int _xc = cvRound( xc ); int _yc = cvRound( yc ); int t0 = cvRound( fabs( length * cs )); int t1 = cvRound( fabs( width * sn )); t0 = MAX( t0, t1 ) + 2; window.width = MIN( t0, (size.width - _xc) * 2 ); t0 = cvRound( fabs( length * sn )); t1 = cvRound( fabs( width * cs )); t0 = MAX( t0, t1 ) + 2; window.height = MIN( t0, (size.height - _yc) * 2 ); window.x = MAX( 0, _xc - window.width / 2 ); window.y = MAX( 0, _yc - window.height / 2 ); window.width = MIN( size.width - window.x, window.width ); window.height = MIN( size.height - window.y, window.height ); RotatedRect box; box.size.height = (float)length; box.size.width = (float)width; box.angle = (float)((CV_PI*0.5+theta)*180./CV_PI); while(box.angle < 0) box.angle += 360; while(box.angle >= 360) box.angle -= 360; if(box.angle >= 180) box.angle -= 180; box.center = Point2f( window.x + window.width*0.5f, window.y + window.height*0.5f); return box; }
/******************************************************************************* * Function: getOtsuThreshold * Description: computes the threhsold using Otsu's method * Arguments: lowerVal - lower bound of pixel value upperVal - upper bound of pixel value u1Ptr - pointer to receive the mean of class 1 roiMask - ROI binary mask * Returns: int - Otsu threshold * Comments: * Revision: *******************************************************************************/ inline int FGExtraction::getOtsuThreshold(int lowerVal, int upperVal, int* u1Ptr, InputArray roiMask) { Mat _roiMask = roiMask.getMat(); int channels[] = {0}; int nbins = 256; const int histSize[] = {nbins}; float range[] = {0, 255}; const float* ranges[] = {range}; Mat hist; cv::calcHist(&_inImg, 1, channels, roiMask, hist, 1, histSize, ranges); Mat_<float> hist_(hist); float size = float(sum(hist)[0]); float w1, w2, u1, u2; float max = -1; int index = 1; float u1max = -1; float histMax = 0; int mode = 0; float count = 0; for (int i = lowerVal+1; i < upperVal; ++i){ if(hist_(i,0) > histMax) { histMax = hist_(i,0); mode = i; } w1 = 0; for (int j = lowerVal+1; j <= i; ++j){ w1 = w1 + hist_(j-1,0); } w1 = w1 / size; w2 = 1 - w1; u1 = 0; count = 0; for (int j = lowerVal; j <= i-1; ++j){ u1 = u1 + j*hist_(j,0); count += hist_(j,0); } u1 /= count; u2 = 0; count = 0; for (int j = i; j <= upperVal; ++j){ u2 = u2 + j*hist_(j, 0); count += hist_(j, 0); } u2 /= count; if (w1 * w2 * (u1-u2) * (u1-u2) > max){ max = w1 * w2 * (u1-u2) * (u1-u2); index = i; u1max = u1; } else{ max = max; index = index; } } //cout << "mode = " << mode << endl; //cout << "u1 = " << u1max << "; index = " << index << "; "; *u1Ptr = (int)(u1max + 0.5); return index; }
void SAGMMBuilder::Update(InputArray frame, OutputArray mask) { Mat Image = frame.getMat(); Mat Foreground(Image.size(),CV_8U,Scalar::all(0)); Mat FilteredImage; // Initialize temporal-spatial filter. if (frame_counter < filter->getTemporalWindow()) { if (frame_counter == 0) { // Initialize in zero three channels of img kernel. filter->initializeFirstImage(Image); } //Apply filter and puts result in FilteredImage. //Note this filter also keeps internal copy of filter result. filter->SpatioTemporalPreprocessing(Image, FilteredImage); Foreground.copyTo(mask); frame_counter += 1; if ( frame_counter == filter->getTemporalWindow() ) { // Initialize model model->initializeModel(FilteredImage); model->getBackground(Background); } return; } //Applies spatial and temporal filter //note this filter return a Mat CV_32FC3 type. filter->SpatioTemporalPreprocessing(Image, FilteredImage); //Global illumination changing factor 'g' between reference image ir and current image ic. double globalIlluminationFactor = factor->getIlluminationFactor(FilteredImage,Background); //Calling background subtraction algorithm. model->operator()(FilteredImage, Foreground, update_bg_model ? -1 : 0, globalIlluminationFactor); // background to calculate illumination next iteration. model->getBackground(Background); // Applying morphological filter, Erode the image Mat Eroded; if (ApplyMorphologicalFilter) { Mat Element(2,2,CV_8U,Scalar(1)); //erode(Mask,Eroded,Mat()); erode(Foreground,Eroded,Element); Eroded.copyTo(mask); } else { // return mask Foreground.copyTo(mask); } Foreground.copyTo(mask); frame_counter += 1; }
void FeatureShiCorner::computeRawCornerMat( InputArray _image, OutputArray _corner ) { // TODO check: _corner must be CV_32SC1 const Mat image = _image.getMat(); const int height = image.rows; const int width = image.cols; const int radius = 1; Mat derX( height, width, CV_32SC1, Scalar( 0 ) ); Mat derY( height, width, CV_32SC1, Scalar( 0 ) ); Mat Mx2( height, width, CV_32SC1, Scalar( 0 ) ); Mat My2( height, width, CV_32SC1, Scalar( 0 ) ); Mat Mxy( height, width, CV_32SC1, Scalar( 0 ) ); applyFilter< uchar, int32_t >( _image, derX, &filter_derX[0][0], 3, 1, 0, true ); applyFilter< uchar, int32_t >( _image, derY, &filter_derY[0][0], 1, 3, 0, true ); int normDivisor = 0; const int * pGauss = &FeatureShiCorner::filter_gauss[0][0]; int const * pGaussE = pGauss + 9; for(; pGauss != pGaussE; pGauss++ ) { normDivisor += abs( *pGauss ); } int32_t maxVal = 0; for( int y = 0; y < height; y++ ) { for( int x = 0; x < width; x++ ) { for( int dy = -radius; dy <= radius; dy++ ) { for( int dx = -radius; dx <= radius; dx++ ) { int fx = x + dx; if( (fx < 0) || (fx >= width) ) { continue; } int fy = y + dy; if( (fy < 0) || (fy >= height) ) { continue; } int f = FeatureShiCorner::filter_gauss[(radius + dx)][(radius + dy)]; Mx2.at< int32_t >( y, x ) += int32_t( f * pow( derX.at< int32_t >( fy, fx ), 2 ) ); My2.at< int32_t >( y, x ) += int32_t( f * pow( derY.at< int32_t >( fy, fx ), 2 ) ); Mxy.at< int32_t >( y, x ) += int32_t( f * derX.at< int32_t >( fy, fx ) * derY.at< int >( fy, fx ) ); } } Mx2.at< int32_t >( y, x ) /= normDivisor; My2.at< int32_t >( y, x ) /= normDivisor; Mxy.at< int32_t >( y, x ) /= normDivisor; maxVal = max( Mx2.at< int32_t >( y, x ), maxVal ); maxVal = max( My2.at< int32_t >( y, x ), maxVal ); maxVal = max( Mxy.at< int32_t >( y, x ), maxVal ); } } Mat corners = _corner.getMat(); const auto it_cE = corners.end< int32_t >(); auto it_cS = corners.begin< int32_t >(); auto it_Mx2S = Mx2.begin< int32_t >(); auto it_My2S = My2.begin< int32_t >(); auto it_MxyS = Mxy.begin< int32_t >(); // reduce to high values if necessary // maxval: 0..1 * 255^2, maxval^2 should not overflow for the next step // reduce to sqrt( 2^31-1 (signed int) ) -> 46340 const int maxValC = 46340; if( maxVal > maxValC ) { cout << "maxVal > maxValC | maxVal: " << maxVal << endl; const double scaleFac = maxValC / (double) maxVal; // scaleFac = 0.xxxx while( it_cS != it_cE ) { *it_cS *= int32_t( scaleFac ); *it_Mx2S *= int32_t( scaleFac ); *it_My2S *= int32_t( scaleFac ); *it_MxyS *= int32_t( scaleFac ); it_cS++; it_Mx2S++; it_My2S++; it_MxyS++; } // reset iterators it_cS = corners.begin< int32_t >(); it_Mx2S = Mx2.begin< int32_t >(); it_My2S = My2.begin< int32_t >(); it_MxyS = Mxy.begin< int32_t >(); } maxVal = 0; // calc eigenvalues int32_t trc, det; double ev_sqrt, trc_halve, eigVal1, eigVal2; while( it_cS != it_cE ) { trc = *it_Mx2S + *it_My2S; det = *it_Mx2S * *it_My2S - *it_MxyS * *it_MxyS; ev_sqrt = sqrt( ( (trc * trc) / 4 ) - det ); trc_halve = trc / 2.0; eigVal1 = trc_halve + ev_sqrt; eigVal2 = trc_halve - ev_sqrt; if( (eigVal1 < 0) || (eigVal2 < 0) ) { eigVal1 = 0; eigVal2 = 0; } *it_cS = (int32_t) min( eigVal1, eigVal2 ); maxVal = max( (int32_t) min( eigVal1, eigVal2 ), maxVal ); it_cS++; it_Mx2S++; it_My2S++; it_MxyS++; } // * if( maxVal != 0 ) { const double threshold = maxVal * 0.2; for( auto it_cE = corners.end< int32_t >(), it_cS = corners.begin< int32_t >(); it_cS != it_cE; it_cS++ ) { if( *it_cS < threshold ) { *it_cS = 0; } } } // */ // * Mat cornersFiltered( height, width, CV_32SC1 ); maxFilter< int32_t >( corners, cornersFiltered, 5, 5 ); // */ if( isDebugMode ) { Mat derXd, derYd, cornersd; cornersFiltered.convertTo( cornersd, CV_8UC1 ); derX.convertTo( derXd, CV_8UC1 ); derY.convertTo( derYd, CV_8UC1 ); // Display corners over the image (cross) Mat cornersdc = image.clone(); auto cornerPoints = genPoints( cornersFiltered ); for( auto p : cornerPoints ) { for( int dx = -2; dx <= 2; dx++ ) { int x = p.first + dx; int y = p.second; if( ( x < 0) || ( x >= width) ) { continue; } cornersdc.at< uchar >( y, x ) = 0; } for( int dy = -2; dy <= 2; dy++ ) { int x = p.first; int y = p.second + dy; if( ( y < 0) || ( y >= height) ) { continue; } cornersdc.at< uchar >( y, x ) = 0; } } imshow( "image", image ); imshow( "derX", derXd ); imshow( "derY", derYd ); imshow( "Shi Corner", cornersd ); imshow( "Shi Corner Image", cornersdc ); waitKey( 0 ); destroyAllWindows(); waitKey( 1 ); } }
void guiAlphaBlend(InputArray src1_, InputArray src2_) { Mat& src1 = src1_.getMat(); Mat& src2 = src2_.getMat(); Mat s1,s2; if(src1.depth()==CV_8U || src1.depth()==CV_32F) { if(src1.channels()==1)cvtColor(src1,s1,CV_GRAY2BGR); else s1 = src1; if(src2.channels()==1)cvtColor(src2,s2,CV_GRAY2BGR); else s2 = src2; } else { Mat ss1,ss2; src1.convertTo(ss1,CV_32F); src2.convertTo(ss2,CV_32F); if(src1.channels()==1)cvtColor(ss1,s1,CV_GRAY2BGR); else s1 = ss1.clone(); if(src2.channels()==1)cvtColor(ss2,s2,CV_GRAY2BGR); else s2 = ss2.clone(); } namedWindow("alphaBlend"); int a = 0; createTrackbar("a","alphaBlend",&a,100); int key = 0; Mat show; while(key!='q') { addWeighted(s1,1.0-a/100.0,s2,a/100.0,0.0,show); if(show.depth()==CV_8U) imshow("alphaBlend",show); else { double minv,maxv; minMaxLoc(show, &minv, &maxv); Mat s; if(maxv<=255) show.convertTo(s,CV_8U); else show.convertTo(s,CV_8U,255/maxv); imshow("alphaBlend",s); } key = waitKey(1); if(key=='f') { a = (a > 0) ? 0 : 100; setTrackbarPos("a","alphaBlend",a); } if(key=='i') { showMatInfo(src1,"========src1========"); cout<<endl; showMatInfo(src2,"========src2========"); } } destroyWindow("alphaBlend"); }
inline UMat ToUMat(InputArray src) { UMat dst; src.getMat().copyTo(dst); return dst; }
static inline double checkNormRelative(InputArray m1, InputArray m2, InputArray mask = noArray()) { return cvtest::norm(m1.getMat(), m2.getMat(), cv::NORM_INF, mask) / std::max((double)std::numeric_limits<float>::epsilon(), (double)std::max(cvtest::norm(m1.getMat(), cv::NORM_INF), cvtest::norm(m2.getMat(), cv::NORM_INF))); }
Mat imdecode( InputArray _buf, int flags ) { Mat buf = _buf.getMat(), img; imdecode_( buf, flags, LOAD_MAT, &img ); return img; }
void FAST_t(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression) { Mat img = _img.getMat(); const int K = patternSize/2, N = patternSize + K + 1; #if CV_SSE2 const int quarterPatternSize = patternSize/4; (void)quarterPatternSize; #endif int i, j, k, pixel[25]; makeOffsets(pixel, (int)img.step, patternSize); keypoints.clear(); threshold = std::min(std::max(threshold, 0), 255); #if CV_SSE2 __m128i delta = _mm_set1_epi8(-128), t = _mm_set1_epi8((char)threshold), K16 = _mm_set1_epi8((char)K); (void)K16; (void)delta; (void)t; #endif uchar threshold_tab[512]; for( i = -255; i <= 255; i++ ) threshold_tab[i+255] = (uchar)(i < -threshold ? 1 : i > threshold ? 2 : 0); AutoBuffer<uchar> _buf((img.cols+16)*3*(sizeof(int) + sizeof(uchar)) + 128); uchar* buf[3]; buf[0] = _buf; buf[1] = buf[0] + img.cols; buf[2] = buf[1] + img.cols; int* cpbuf[3]; cpbuf[0] = (int*)alignPtr(buf[2] + img.cols, sizeof(int)) + 1; cpbuf[1] = cpbuf[0] + img.cols + 1; cpbuf[2] = cpbuf[1] + img.cols + 1; memset(buf[0], 0, img.cols*3); for(i = 3; i < img.rows-2; i++) { const uchar* ptr = img.ptr<uchar>(i) + 3; uchar* curr = buf[(i - 3)%3]; int* cornerpos = cpbuf[(i - 3)%3]; memset(curr, 0, img.cols); int ncorners = 0; if( i < img.rows - 3 ) { j = 3; #if CV_SSE2 if( patternSize == 16 ) { for(; j < img.cols - 16 - 3; j += 16, ptr += 16) { __m128i m0, m1; __m128i v0 = _mm_loadu_si128((const __m128i*)ptr); __m128i v1 = _mm_xor_si128(_mm_subs_epu8(v0, t), delta); v0 = _mm_xor_si128(_mm_adds_epu8(v0, t), delta); __m128i x0 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[0])), delta); __m128i x1 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[quarterPatternSize])), delta); __m128i x2 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[2*quarterPatternSize])), delta); __m128i x3 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[3*quarterPatternSize])), delta); m0 = _mm_and_si128(_mm_cmpgt_epi8(x0, v0), _mm_cmpgt_epi8(x1, v0)); m1 = _mm_and_si128(_mm_cmpgt_epi8(v1, x0), _mm_cmpgt_epi8(v1, x1)); m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x1, v0), _mm_cmpgt_epi8(x2, v0))); m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x1), _mm_cmpgt_epi8(v1, x2))); m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x2, v0), _mm_cmpgt_epi8(x3, v0))); m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x2), _mm_cmpgt_epi8(v1, x3))); m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x3, v0), _mm_cmpgt_epi8(x0, v0))); m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x3), _mm_cmpgt_epi8(v1, x0))); m0 = _mm_or_si128(m0, m1); int mask = _mm_movemask_epi8(m0); if( mask == 0 ) continue; if( (mask & 255) == 0 ) { j -= 8; ptr -= 8; continue; } __m128i c0 = _mm_setzero_si128(), c1 = c0, max0 = c0, max1 = c0; for( k = 0; k < N; k++ ) { __m128i x = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(ptr + pixel[k])), delta); m0 = _mm_cmpgt_epi8(x, v0); m1 = _mm_cmpgt_epi8(v1, x); c0 = _mm_and_si128(_mm_sub_epi8(c0, m0), m0); c1 = _mm_and_si128(_mm_sub_epi8(c1, m1), m1); max0 = _mm_max_epu8(max0, c0); max1 = _mm_max_epu8(max1, c1); } max0 = _mm_max_epu8(max0, max1); int m = _mm_movemask_epi8(_mm_cmpgt_epi8(max0, K16)); for( k = 0; m > 0 && k < 16; k++, m >>= 1 ) if(m & 1) { cornerpos[ncorners++] = j+k; if(nonmax_suppression) curr[j+k] = (uchar)cornerScore<patternSize>(ptr+k, pixel, threshold); } } } #endif for( ; j < img.cols - 3; j++, ptr++ ) { int v = ptr[0]; const uchar* tab = &threshold_tab[0] - v + 255; int d = tab[ptr[pixel[0]]] | tab[ptr[pixel[8]]]; if( d == 0 ) continue; d &= tab[ptr[pixel[2]]] | tab[ptr[pixel[10]]]; d &= tab[ptr[pixel[4]]] | tab[ptr[pixel[12]]]; d &= tab[ptr[pixel[6]]] | tab[ptr[pixel[14]]]; if( d == 0 ) continue; d &= tab[ptr[pixel[1]]] | tab[ptr[pixel[9]]]; d &= tab[ptr[pixel[3]]] | tab[ptr[pixel[11]]]; d &= tab[ptr[pixel[5]]] | tab[ptr[pixel[13]]]; d &= tab[ptr[pixel[7]]] | tab[ptr[pixel[15]]]; if( d & 1 ) { int vt = v - threshold, count = 0; for( k = 0; k < N; k++ ) { int x = ptr[pixel[k]]; if(x < vt) { if( ++count > K ) { cornerpos[ncorners++] = j; if(nonmax_suppression) curr[j] = (uchar)cornerScore<patternSize>(ptr, pixel, threshold); break; } } else count = 0; } } if( d & 2 ) { int vt = v + threshold, count = 0; for( k = 0; k < N; k++ ) { int x = ptr[pixel[k]]; if(x > vt) { if( ++count > K ) { cornerpos[ncorners++] = j; if(nonmax_suppression) curr[j] = (uchar)cornerScore<patternSize>(ptr, pixel, threshold); break; } } else count = 0; } } } } cornerpos[-1] = ncorners; if( i == 3 ) continue; const uchar* prev = buf[(i - 4 + 3)%3]; const uchar* pprev = buf[(i - 5 + 3)%3]; cornerpos = cpbuf[(i - 4 + 3)%3]; ncorners = cornerpos[-1]; for( k = 0; k < ncorners; k++ ) { j = cornerpos[k]; int score = prev[j]; if( !nonmax_suppression || (score > prev[j+1] && score > prev[j-1] && score > pprev[j-1] && score > pprev[j] && score > pprev[j+1] && score > curr[j-1] && score > curr[j] && score > curr[j+1]) ) { keypoints.push_back(KeyPoint((float)j, (float)(i-1), 7.f, -1, (float)score)); } } }
static void divSpectrums( InputArray _srcA, InputArray _srcB, OutputArray _dst, int flags, bool conjB) { Mat srcA = _srcA.getMat(), srcB = _srcB.getMat(); int depth = srcA.depth(), cn = srcA.channels(), type = srcA.type(); int rows = srcA.rows, cols = srcA.cols; int j, k; CV_Assert( type == srcB.type() && srcA.size() == srcB.size() ); CV_Assert( type == CV_32FC1 || type == CV_32FC2 || type == CV_64FC1 || type == CV_64FC2 ); _dst.create( srcA.rows, srcA.cols, type ); Mat dst = _dst.getMat(); bool is_1d = (flags & DFT_ROWS) || (rows == 1 || (cols == 1 && srcA.isContinuous() && srcB.isContinuous() && dst.isContinuous())); if( is_1d && !(flags & DFT_ROWS) ) cols = cols + rows - 1, rows = 1; int ncols = cols*cn; int j0 = cn == 1; int j1 = ncols - (cols % 2 == 0 && cn == 1); if( depth == CV_32F ) { const float* dataA = srcA.ptr<float>(); const float* dataB = srcB.ptr<float>(); float* dataC = dst.ptr<float>(); float eps = FLT_EPSILON; // prevent div0 problems size_t stepA = srcA.step/sizeof(dataA[0]); size_t stepB = srcB.step/sizeof(dataB[0]); size_t stepC = dst.step/sizeof(dataC[0]); if( !is_1d && cn == 1 ) { for( k = 0; k < (cols % 2 ? 1 : 2); k++ ) { if( k == 1 ) dataA += cols - 1, dataB += cols - 1, dataC += cols - 1; dataC[0] = dataA[0] / (dataB[0] + eps); if( rows % 2 == 0 ) dataC[(rows-1)*stepC] = dataA[(rows-1)*stepA] / (dataB[(rows-1)*stepB] + eps); if( !conjB ) for( j = 1; j <= rows - 2; j += 2 ) { double denom = (double)dataB[j*stepB]*dataB[j*stepB] + (double)dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + (double)eps; double re = (double)dataA[j*stepA]*dataB[j*stepB] + (double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; double im = (double)dataA[(j+1)*stepA]*dataB[j*stepB] - (double)dataA[j*stepA]*dataB[(j+1)*stepB]; dataC[j*stepC] = (float)(re / denom); dataC[(j+1)*stepC] = (float)(im / denom); } else for( j = 1; j <= rows - 2; j += 2 ) { double denom = (double)dataB[j*stepB]*dataB[j*stepB] + (double)dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + (double)eps; double re = (double)dataA[j*stepA]*dataB[j*stepB] - (double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; double im = (double)dataA[(j+1)*stepA]*dataB[j*stepB] + (double)dataA[j*stepA]*dataB[(j+1)*stepB]; dataC[j*stepC] = (float)(re / denom); dataC[(j+1)*stepC] = (float)(im / denom); } if( k == 1 ) dataA -= cols - 1, dataB -= cols - 1, dataC -= cols - 1; } } for( ; rows--; dataA += stepA, dataB += stepB, dataC += stepC ) { if( is_1d && cn == 1 ) { dataC[0] = dataA[0] / (dataB[0] + eps); if( cols % 2 == 0 ) dataC[j1] = dataA[j1] / (dataB[j1] + eps); } if( !conjB ) for( j = j0; j < j1; j += 2 ) { double denom = (double)(dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps); double re = (double)(dataA[j]*dataB[j] + dataA[j+1]*dataB[j+1]); double im = (double)(dataA[j+1]*dataB[j] - dataA[j]*dataB[j+1]); dataC[j] = (float)(re / denom); dataC[j+1] = (float)(im / denom); } else for( j = j0; j < j1; j += 2 ) { double denom = (double)(dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps); double re = (double)(dataA[j]*dataB[j] - dataA[j+1]*dataB[j+1]); double im = (double)(dataA[j+1]*dataB[j] + dataA[j]*dataB[j+1]); dataC[j] = (float)(re / denom); dataC[j+1] = (float)(im / denom); } } } else { const double* dataA = srcA.ptr<double>(); const double* dataB = srcB.ptr<double>(); double* dataC = dst.ptr<double>(); double eps = DBL_EPSILON; // prevent div0 problems size_t stepA = srcA.step/sizeof(dataA[0]); size_t stepB = srcB.step/sizeof(dataB[0]); size_t stepC = dst.step/sizeof(dataC[0]); if( !is_1d && cn == 1 ) { for( k = 0; k < (cols % 2 ? 1 : 2); k++ ) { if( k == 1 ) dataA += cols - 1, dataB += cols - 1, dataC += cols - 1; dataC[0] = dataA[0] / (dataB[0] + eps); if( rows % 2 == 0 ) dataC[(rows-1)*stepC] = dataA[(rows-1)*stepA] / (dataB[(rows-1)*stepB] + eps); if( !conjB ) for( j = 1; j <= rows - 2; j += 2 ) { double denom = dataB[j*stepB]*dataB[j*stepB] + dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + eps; double re = dataA[j*stepA]*dataB[j*stepB] + dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; double im = dataA[(j+1)*stepA]*dataB[j*stepB] - dataA[j*stepA]*dataB[(j+1)*stepB]; dataC[j*stepC] = re / denom; dataC[(j+1)*stepC] = im / denom; } else for( j = 1; j <= rows - 2; j += 2 ) { double denom = dataB[j*stepB]*dataB[j*stepB] + dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + eps; double re = dataA[j*stepA]*dataB[j*stepB] - dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; double im = dataA[(j+1)*stepA]*dataB[j*stepB] + dataA[j*stepA]*dataB[(j+1)*stepB]; dataC[j*stepC] = re / denom; dataC[(j+1)*stepC] = im / denom; } if( k == 1 ) dataA -= cols - 1, dataB -= cols - 1, dataC -= cols - 1; } } for( ; rows--; dataA += stepA, dataB += stepB, dataC += stepC ) { if( is_1d && cn == 1 ) { dataC[0] = dataA[0] / (dataB[0] + eps); if( cols % 2 == 0 ) dataC[j1] = dataA[j1] / (dataB[j1] + eps); } if( !conjB ) for( j = j0; j < j1; j += 2 ) { double denom = dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps; double re = dataA[j]*dataB[j] + dataA[j+1]*dataB[j+1]; double im = dataA[j+1]*dataB[j] - dataA[j]*dataB[j+1]; dataC[j] = re / denom; dataC[j+1] = im / denom; } else for( j = j0; j < j1; j += 2 ) { double denom = dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps; double re = dataA[j]*dataB[j] - dataA[j+1]*dataB[j+1]; double im = dataA[j+1]*dataB[j] + dataA[j]*dataB[j+1]; dataC[j] = re / denom; dataC[j+1] = im / denom; } } } }
void SIFT_Impl::detectAndCompute(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints, OutputArray _descriptors, bool useProvidedKeypoints) { int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0; Mat image = _image.getMat(), mask = _mask.getMat(); if( image.empty() || image.depth() != CV_8U ) CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" ); if( !mask.empty() && mask.type() != CV_8UC1 ) CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" ); if( useProvidedKeypoints ) { firstOctave = 0; int maxOctave = INT_MIN; for( size_t i = 0; i < keypoints.size(); i++ ) { int octave, layer; float scale; unpackOctave(keypoints[i], octave, layer, scale); firstOctave = std::min(firstOctave, octave); maxOctave = std::max(maxOctave, octave); actualNLayers = std::max(actualNLayers, layer-2); } firstOctave = std::min(firstOctave, 0); CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers ); actualNOctaves = maxOctave - firstOctave + 1; } Mat base = createInitialImage(image, firstOctave < 0, (float)sigma); std::vector<Mat> gpyr, dogpyr; int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(std::log( (double)std::min( base.cols, base.rows ) ) / std::log(2.) - 2) - firstOctave; //double t, tf = getTickFrequency(); //t = (double)getTickCount(); buildGaussianPyramid(base, gpyr, nOctaves); buildDoGPyramid(gpyr, dogpyr); //t = (double)getTickCount() - t; //printf("pyramid construction time: %g\n", t*1000./tf); if( !useProvidedKeypoints ) { //t = (double)getTickCount(); findScaleSpaceExtrema(gpyr, dogpyr, keypoints); KeyPointsFilter::removeDuplicated( keypoints ); if( nfeatures > 0 ) KeyPointsFilter::retainBest(keypoints, nfeatures); //t = (double)getTickCount() - t; //printf("keypoint detection time: %g\n", t*1000./tf); if( firstOctave < 0 ) for( size_t i = 0; i < keypoints.size(); i++ ) { KeyPoint& kpt = keypoints[i]; float scale = 1.f/(float)(1 << -firstOctave); kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255); kpt.pt *= scale; kpt.size *= scale; } if( !mask.empty() ) KeyPointsFilter::runByPixelsMask( keypoints, mask ); } else { // filter keypoints by mask //KeyPointsFilter::runByPixelsMask( keypoints, mask ); } if( _descriptors.needed() ) { //t = (double)getTickCount(); int dsize = descriptorSize(); _descriptors.create((int)keypoints.size(), dsize, CV_32F); Mat descriptors = _descriptors.getMat(); calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave); //t = (double)getTickCount() - t; //printf("descriptor extraction time: %g\n", t*1000./tf); } }
static Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weightBoxSize, double* response) { Mat src = _src.getMat(); int type = src.type(); CV_Assert( type == CV_32FC1 || type == CV_64FC1 ); int minr = peakLocation.y - (weightBoxSize.height >> 1); int maxr = peakLocation.y + (weightBoxSize.height >> 1); int minc = peakLocation.x - (weightBoxSize.width >> 1); int maxc = peakLocation.x + (weightBoxSize.width >> 1); Point2d centroid; double sumIntensity = 0.0; // clamp the values to min and max if needed. if(minr < 0) { minr = 0; } if(minc < 0) { minc = 0; } if(maxr > src.rows - 1) { maxr = src.rows - 1; } if(maxc > src.cols - 1) { maxc = src.cols - 1; } if(type == CV_32FC1) { const float* dataIn = src.ptr<float>(); dataIn += minr*src.cols; for(int y = minr; y <= maxr; y++) { for(int x = minc; x <= maxc; x++) { centroid.x += (double)x*dataIn[x]; centroid.y += (double)y*dataIn[x]; sumIntensity += (double)dataIn[x]; } dataIn += src.cols; } } else { const double* dataIn = src.ptr<double>(); dataIn += minr*src.cols; for(int y = minr; y <= maxr; y++) { for(int x = minc; x <= maxc; x++) { centroid.x += (double)x*dataIn[x]; centroid.y += (double)y*dataIn[x]; sumIntensity += dataIn[x]; } dataIn += src.cols; } } if(response) *response = sumIntensity; sumIntensity += DBL_EPSILON; // prevent div0 problems... centroid.x /= sumIntensity; centroid.y /= sumIntensity; return centroid; }
double cv::findTransformECC(InputArray templateImage, InputArray inputImage, InputOutputArray warpMatrix, int motionType, TermCriteria criteria) { Mat src = templateImage.getMat();//template iamge Mat dst = inputImage.getMat(); //input image (to be warped) Mat map = warpMatrix.getMat(); //warp (transformation) CV_Assert(!src.empty()); CV_Assert(!dst.empty()); if( ! (src.type()==dst.type())) CV_Error( CV_StsUnmatchedFormats, "Both input images must have the same data type" ); //accept only 1-channel images if( src.type() != CV_8UC1 && src.type()!= CV_32FC1) CV_Error( CV_StsUnsupportedFormat, "Images must have 8uC1 or 32fC1 type"); if( map.type() != CV_32FC1) CV_Error( CV_StsUnsupportedFormat, "warpMatrix must be single-channel floating-point matrix"); CV_Assert (map.cols == 3); CV_Assert (map.rows == 2 || map.rows ==3); CV_Assert (motionType == MOTION_AFFINE || motionType == MOTION_HOMOGRAPHY || motionType == MOTION_EUCLIDEAN || motionType == MOTION_TRANSLATION); if (motionType == MOTION_HOMOGRAPHY){ CV_Assert (map.rows ==3); } CV_Assert (criteria.type & TermCriteria::COUNT || criteria.type & TermCriteria::EPS); const int numberOfIterations = (criteria.type & TermCriteria::COUNT) ? criteria.maxCount : 200; const double termination_eps = (criteria.type & TermCriteria::EPS) ? criteria.epsilon : -1; int paramTemp = 6;//default: affine switch (motionType){ case MOTION_TRANSLATION: paramTemp = 2; break; case MOTION_EUCLIDEAN: paramTemp = 3; break; case MOTION_HOMOGRAPHY: paramTemp = 8; break; } const int numberOfParameters = paramTemp; const int ws = src.cols; const int hs = src.rows; const int wd = dst.cols; const int hd = dst.rows; Mat Xcoord = Mat(1, ws, CV_32F); Mat Ycoord = Mat(hs, 1, CV_32F); Mat Xgrid = Mat(hs, ws, CV_32F); Mat Ygrid = Mat(hs, ws, CV_32F); float* XcoPtr = Xcoord.ptr<float>(0); float* YcoPtr = Ycoord.ptr<float>(0); int j; for (j=0; j<ws; j++) XcoPtr[j] = (float) j; for (j=0; j<hs; j++) YcoPtr[j] = (float) j; repeat(Xcoord, hs, 1, Xgrid); repeat(Ycoord, 1, ws, Ygrid); Xcoord.release(); Ycoord.release(); Mat templateZM = Mat(hs, ws, CV_32F);// to store the (smoothed)zero-mean version of template Mat templateFloat = Mat(hs, ws, CV_32F);// to store the (smoothed) template Mat imageFloat = Mat(hd, wd, CV_32F);// to store the (smoothed) input image Mat imageWarped = Mat(hs, ws, CV_32F);// to store the warped zero-mean input image Mat allOnes = Mat::ones(hd, wd, CV_8U); //to use it for mask warping Mat imageMask = Mat(hs, ws, CV_8U); //to store the final mask //gaussian filtering is optional src.convertTo(templateFloat, templateFloat.type()); GaussianBlur(templateFloat, templateFloat, Size(5, 5), 0, 0);//is in-place filtering slower? dst.convertTo(imageFloat, imageFloat.type()); GaussianBlur(imageFloat, imageFloat, Size(5, 5), 0, 0); // needed matrices for gradients and warped gradients Mat gradientX = Mat::zeros(hd, wd, CV_32FC1); Mat gradientY = Mat::zeros(hd, wd, CV_32FC1); Mat gradientXWarped = Mat(hs, ws, CV_32FC1); Mat gradientYWarped = Mat(hs, ws, CV_32FC1); // calculate first order image derivatives Matx13f dx(-0.5f, 0.0f, 0.5f); filter2D(imageFloat, gradientX, -1, dx); filter2D(imageFloat, gradientY, -1, dx.t()); // matrices needed for solving linear equation system for maximizing ECC Mat jacobian = Mat(hs, ws*numberOfParameters, CV_32F); Mat hessian = Mat(numberOfParameters, numberOfParameters, CV_32F); Mat hessianInv = Mat(numberOfParameters, numberOfParameters, CV_32F); Mat imageProjection = Mat(numberOfParameters, 1, CV_32F); Mat templateProjection = Mat(numberOfParameters, 1, CV_32F); Mat imageProjectionHessian = Mat(numberOfParameters, 1, CV_32F); Mat errorProjection = Mat(numberOfParameters, 1, CV_32F); Mat deltaP = Mat(numberOfParameters, 1, CV_32F);//transformation parameter correction Mat error = Mat(hs, ws, CV_32F);//error as 2D matrix const int imageFlags = CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS+CV_WARP_INVERSE_MAP; const int maskFlags = CV_INTER_NN+CV_WARP_FILL_OUTLIERS+CV_WARP_INVERSE_MAP; // iteratively update map_matrix double rho = -1; double last_rho = - termination_eps; for (int i = 1; (i <= numberOfIterations) && (fabs(rho-last_rho)>= termination_eps); i++) { // warp-back portion of the inputImage and gradients to the coordinate space of the templateImage if (motionType != MOTION_HOMOGRAPHY) { warpAffine(imageFloat, imageWarped, map, imageWarped.size(), imageFlags); warpAffine(gradientX, gradientXWarped, map, gradientXWarped.size(), imageFlags); warpAffine(gradientY, gradientYWarped, map, gradientYWarped.size(), imageFlags); warpAffine(allOnes, imageMask, map, imageMask.size(), maskFlags); } else { warpPerspective(imageFloat, imageWarped, map, imageWarped.size(), imageFlags); warpPerspective(gradientX, gradientXWarped, map, gradientXWarped.size(), imageFlags); warpPerspective(gradientY, gradientYWarped, map, gradientYWarped.size(), imageFlags); warpPerspective(allOnes, imageMask, map, imageMask.size(), maskFlags); } Scalar imgMean, imgStd, tmpMean, tmpStd; meanStdDev(imageWarped, imgMean, imgStd, imageMask); meanStdDev(templateFloat, tmpMean, tmpStd, imageMask); subtract(imageWarped, imgMean, imageWarped, imageMask);//zero-mean input subtract(templateFloat, tmpMean, templateZM, imageMask);//zero-mean template const double tmpNorm = std::sqrt(countNonZero(imageMask)*(tmpStd.val[0])*(tmpStd.val[0])); const double imgNorm = std::sqrt(countNonZero(imageMask)*(imgStd.val[0])*(imgStd.val[0])); // calculate jacobian of image wrt parameters switch (motionType){ case MOTION_AFFINE: image_jacobian_affine_ECC(gradientXWarped, gradientYWarped, Xgrid, Ygrid, jacobian); break; case MOTION_HOMOGRAPHY: image_jacobian_homo_ECC(gradientXWarped, gradientYWarped, Xgrid, Ygrid, map, jacobian); break; case MOTION_TRANSLATION: image_jacobian_translation_ECC(gradientXWarped, gradientYWarped, jacobian); break; case MOTION_EUCLIDEAN: image_jacobian_euclidean_ECC(gradientXWarped, gradientYWarped, Xgrid, Ygrid, map, jacobian); break; } // calculate Hessian and its inverse project_onto_jacobian_ECC(jacobian, jacobian, hessian); hessianInv = hessian.inv(); const double correlation = templateZM.dot(imageWarped); // calculate enhanced correlation coefficiont (ECC)->rho last_rho = rho; rho = correlation/(imgNorm*tmpNorm); // project images into jacobian project_onto_jacobian_ECC( jacobian, imageWarped, imageProjection); project_onto_jacobian_ECC(jacobian, templateZM, templateProjection); // calculate the parameter lambda to account for illumination variation imageProjectionHessian = hessianInv*imageProjection; const double lambda_n = (imgNorm*imgNorm) - imageProjection.dot(imageProjectionHessian); const double lambda_d = correlation - templateProjection.dot(imageProjectionHessian); if (lambda_d <= 0.0) { rho = -1; CV_Error(CV_StsNoConv, "The algorithm stopped before its convergence. The correlation is going to be minimized. Images may be uncorrelated or non-overlapped"); } const double lambda = (lambda_n/lambda_d); // estimate the update step delta_p error = lambda*templateZM - imageWarped; project_onto_jacobian_ECC(jacobian, error, errorProjection); deltaP = hessianInv * errorProjection; // update warping matrix update_warping_matrix_ECC( map, deltaP, motionType); } // return final correlation coefficient return rho; }
void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, int method ) { CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED ); int numType = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 : method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2; bool isNormed = method == CV_TM_CCORR_NORMED || method == CV_TM_SQDIFF_NORMED || method == CV_TM_CCOEFF_NORMED; Mat img = _img.getMat(), templ = _templ.getMat(); if( img.rows < templ.rows || img.cols < templ.cols ) std::swap(img, templ); CV_Assert( (img.depth() == CV_8U || img.depth() == CV_32F) && img.type() == templ.type() ); Size corrSize(img.cols - templ.cols + 1, img.rows - templ.rows + 1); _result.create(corrSize, CV_32F); Mat result = _result.getMat(); int cn = img.channels(); crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0); if( method == CV_TM_CCORR ) return; double invArea = 1./((double)templ.rows * templ.cols); Mat sum, sqsum; Scalar templMean, templSdv; double *q0 = 0, *q1 = 0, *q2 = 0, *q3 = 0; double templNorm = 0, templSum2 = 0; if( method == CV_TM_CCOEFF ) { integral(img, sum, CV_64F); templMean = mean(templ); } else { integral(img, sum, sqsum, CV_64F); meanStdDev( templ, templMean, templSdv ); templNorm = CV_SQR(templSdv[0]) + CV_SQR(templSdv[1]) + CV_SQR(templSdv[2]) + CV_SQR(templSdv[3]); if( templNorm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED ) { result = Scalar::all(1); return; } templSum2 = templNorm + CV_SQR(templMean[0]) + CV_SQR(templMean[1]) + CV_SQR(templMean[2]) + CV_SQR(templMean[3]); if( numType != 1 ) { templMean = Scalar::all(0); templNorm = templSum2; } templSum2 /= invArea; templNorm = sqrt(templNorm); templNorm /= sqrt(invArea); // care of accuracy here q0 = (double*)sqsum.data; q1 = q0 + templ.cols*cn; q2 = (double*)(sqsum.data + templ.rows*sqsum.step); q3 = q2 + templ.cols*cn; } double* p0 = (double*)sum.data; double* p1 = p0 + templ.cols*cn; double* p2 = (double*)(sum.data + templ.rows*sum.step); double* p3 = p2 + templ.cols*cn; int sumstep = sum.data ? (int)(sum.step / sizeof(double)) : 0; int sqstep = sqsum.data ? (int)(sqsum.step / sizeof(double)) : 0; int i, j, k; for( i = 0; i < result.rows; i++ ) { float* rrow = (float*)(result.data + i*result.step); int idx = i * sumstep; int idx2 = i * sqstep; for( j = 0; j < result.cols; j++, idx += cn, idx2 += cn ) { double num = rrow[j], t; double wndMean2 = 0, wndSum2 = 0; if( numType == 1 ) { for( k = 0; k < cn; k++ ) { t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k]; wndMean2 += CV_SQR(t); num -= t*templMean[k]; } wndMean2 *= invArea; } if( isNormed || numType == 2 ) { for( k = 0; k < cn; k++ ) { t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k]; wndSum2 += t; } if( numType == 2 ) num = wndSum2 - 2*num + templSum2; } if( isNormed ) { t = sqrt(MAX(wndSum2 - wndMean2,0))*templNorm; if( fabs(num) < t ) num /= t; else if( fabs(num) < t*1.125 ) num = num > 0 ? 1 : -1; else num = method != CV_TM_SQDIFF_NORMED ? 0 : 1; } rrow[j] = (float)num; } } }
int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria ) { CV_INSTRUMENT_REGION() Size size; int cn; Mat mat; UMat umat; bool isUMat = _probImage.isUMat(); if (isUMat) umat = _probImage.getUMat(), cn = umat.channels(), size = umat.size(); else mat = _probImage.getMat(), cn = mat.channels(), size = mat.size(); Rect cur_rect = window; CV_Assert( cn == 1 ); if( window.height <= 0 || window.width <= 0 ) CV_Error( Error::StsBadArg, "Input window has non-positive sizes" ); window = window & Rect(0, 0, size.width, size.height); double eps = (criteria.type & TermCriteria::EPS) ? std::max(criteria.epsilon, 0.) : 1.; eps = cvRound(eps*eps); int i, niters = (criteria.type & TermCriteria::MAX_ITER) ? std::max(criteria.maxCount, 1) : 100; for( i = 0; i < niters; i++ ) { cur_rect = cur_rect & Rect(0, 0, size.width, size.height); if( cur_rect == Rect() ) { cur_rect.x = size.width/2; cur_rect.y = size.height/2; } cur_rect.width = std::max(cur_rect.width, 1); cur_rect.height = std::max(cur_rect.height, 1); Moments m = isUMat ? moments(umat(cur_rect)) : moments(mat(cur_rect)); // Calculating center of mass if( fabs(m.m00) < DBL_EPSILON ) break; int dx = cvRound( m.m10/m.m00 - window.width*0.5 ); int dy = cvRound( m.m01/m.m00 - window.height*0.5 ); int nx = std::min(std::max(cur_rect.x + dx, 0), size.width - cur_rect.width); int ny = std::min(std::max(cur_rect.y + dy, 0), size.height - cur_rect.height); dx = nx - cur_rect.x; dy = ny - cur_rect.y; cur_rect.x = nx; cur_rect.y = ny; // Check for coverage centers mass & window if( dx*dx + dy*dy < eps ) break; } window = cur_rect; return i; }
size_t globalsize[2] = {(size_t)u_bgmodelUsedModes.cols, (size_t)u_bgmodelUsedModes.rows}; return kernel_getBg.run(2, globalsize, NULL, false); } void BackgroundSubtractorMOG2Impl::create_ocl_apply_kernel() { int nchannels = CV_MAT_CN(frameType); bool isFloat = CV_MAKETYPE(CV_32F,nchannels) == frameType; String opts = format("-D CN=%d -D FL=%d -D NMIXTURES=%d%s", nchannels, isFloat, nmixtures, bShadowDetection ? " -D SHADOW_DETECT" : ""); kernel_apply.create("mog2_kernel", ocl::video::bgfg_mog2_oclsrc, opts); } #endif void BackgroundSubtractorMOG2Impl::apply(InputArray _image, OutputArray _fgmask, double learningRate) { CV_INSTRUMENT_REGION() bool needToInitialize = nframes == 0 || learningRate >= 1 || _image.size() != frameSize || _image.type() != frameType; if( needToInitialize ) initialize(_image.size(), _image.type()); #ifdef HAVE_OPENCL if (opencl_ON) { CV_OCL_RUN(_image.isUMat(), ocl_apply(_image, _fgmask, learningRate)) opencl_ON = false; initialize(_image.size(), _image.type());
Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano) { LOGLN("Warping images (auxiliary)... "); vector<Mat> imgs; images.getMatVector(imgs); if (!imgs.empty()) { CV_Assert(imgs.size() == imgs_.size()); Mat img; seam_est_imgs_.resize(imgs.size()); for (size_t i = 0; i < imgs.size(); ++i) { imgs_[i] = imgs[i]; resize(imgs[i], img, Size(), seam_scale_, seam_scale_); seam_est_imgs_[i] = img.clone(); } vector<Mat> seam_est_imgs_subset; vector<Mat> imgs_subset; for (size_t i = 0; i < indices_.size(); ++i) { imgs_subset.push_back(imgs_[indices_[i]]); seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]); } seam_est_imgs_ = seam_est_imgs_subset; imgs_ = imgs_subset; } Mat &pano_ = pano.getMatRef(); #if ENABLE_LOG int64 t = getTickCount(); #endif vector<Point> corners(imgs_.size()); vector<Mat> masks_warped(imgs_.size()); vector<Mat> images_warped(imgs_.size()); vector<Size> sizes(imgs_.size()); vector<Mat> masks(imgs_.size()); // Prepare image masks for (size_t i = 0; i < imgs_.size(); ++i) { masks[i].create(seam_est_imgs_[i].size(), CV_8U); masks[i].setTo(Scalar::all(255)); } // Warp images and their masks Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_)); for (size_t i = 0; i < imgs_.size(); ++i) { Mat_<float> K; cameras_[i].K().convertTo(K, CV_32F); K(0,0) *= (float)seam_work_aspect_; K(0,2) *= (float)seam_work_aspect_; K(1,1) *= (float)seam_work_aspect_; K(1,2) *= (float)seam_work_aspect_; corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]); sizes[i] = images_warped[i].size(); w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]); } vector<Mat> images_warped_f(imgs_.size()); for (size_t i = 0; i < imgs_.size(); ++i) images_warped[i].convertTo(images_warped_f[i], CV_32F); LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); // Find seams exposure_comp_->feed(corners, images_warped, masks_warped); seam_finder_->find(images_warped_f, corners, masks_warped); // Release unused memory seam_est_imgs_.clear(); images_warped.clear(); images_warped_f.clear(); masks.clear(); LOGLN("Compositing..."); #if ENABLE_LOG t = getTickCount(); #endif Mat img_warped, img_warped_s; Mat dilated_mask, seam_mask, mask, mask_warped; //double compose_seam_aspect = 1; double compose_work_aspect = 1; bool is_blender_prepared = false; double compose_scale = 1; bool is_compose_scale_set = false; Mat full_img, img; for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx) { LOGLN("Compositing image #" << indices_[img_idx] + 1); // Read image and resize it if necessary full_img = imgs_[img_idx]; if (!is_compose_scale_set) { if (compose_resol_ > 0) compose_scale = min(1.0, sqrt(compose_resol_ * 1e6 / full_img.size().area())); is_compose_scale_set = true; // Compute relative scales //compose_seam_aspect = compose_scale / seam_scale_; compose_work_aspect = compose_scale / work_scale_; // Update warped image scale warped_image_scale_ *= static_cast<float>(compose_work_aspect); w = warper_->create((float)warped_image_scale_); // Update corners and sizes for (size_t i = 0; i < imgs_.size(); ++i) { // Update intrinsics cameras_[i].focal *= compose_work_aspect; cameras_[i].ppx *= compose_work_aspect; cameras_[i].ppy *= compose_work_aspect; // Update corner and size Size sz = full_img_sizes_[i]; if (std::abs(compose_scale - 1) > 1e-1) { sz.width = cvRound(full_img_sizes_[i].width * compose_scale); sz.height = cvRound(full_img_sizes_[i].height * compose_scale); } Mat K; cameras_[i].K().convertTo(K, CV_32F); Rect roi = w->warpRoi(sz, K, cameras_[i].R); corners[i] = roi.tl(); sizes[i] = roi.size(); } } if (std::abs(compose_scale - 1) > 1e-1) resize(full_img, img, Size(), compose_scale, compose_scale); else img = full_img; full_img.release(); Size img_size = img.size(); Mat K; cameras_[img_idx].K().convertTo(K, CV_32F); // Warp the current image w->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped); // Warp the current image mask mask.create(img_size, CV_8U); mask.setTo(Scalar::all(255)); w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped); // Compensate exposure exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped); img_warped.convertTo(img_warped_s, CV_16S); img_warped.release(); img.release(); mask.release(); // Make sure seam mask has proper size dilate(masks_warped[img_idx], dilated_mask, Mat()); resize(dilated_mask, seam_mask, mask_warped.size()); mask_warped = seam_mask & mask_warped; if (!is_blender_prepared) { blender_->prepare(corners, sizes); is_blender_prepared = true; } // Blend the current image blender_->feed(img_warped_s, mask_warped, corners[img_idx]); } Mat result, result_mask; blender_->blend(result, result_mask); LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); // Preliminary result is in CV_16SC3 format, but all values are in [0,255] range, // so convert it to avoid user confusing result.convertTo(pano_, CV_8U); return OK; }
void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, double scale, double delta, int borderType ) { int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if (ddepth < 0) ddepth = sdepth; _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) ); #ifdef HAVE_TEGRA_OPTIMIZATION if (scale == 1.0 && delta == 0) { Mat src = _src.getMat(), dst = _dst.getMat(); if (ksize == 1 && tegra::laplace1(src, dst, borderType)) return; if (ksize == 3 && tegra::laplace3(src, dst, borderType)) return; if (ksize == 5 && tegra::laplace5(src, dst, borderType)) return; } #endif if( ksize == 1 || ksize == 3 ) { float K[2][9] = { { 0, 1, 0, 1, -4, 1, 0, 1, 0 }, { 2, 0, 2, 0, -8, 0, 2, 0, 2 } }; Mat kernel(3, 3, CV_32F, K[ksize == 3]); if( scale != 1 ) kernel *= scale; filter2D( _src, _dst, ddepth, kernel, Point(-1, -1), delta, borderType ); } else { Mat src = _src.getMat(), dst = _dst.getMat(); const size_t STRIPE_SIZE = 1 << 14; int depth = src.depth(); int ktype = std::max(CV_32F, std::max(ddepth, depth)); int wdepth = depth == CV_8U && ksize <= 5 ? CV_16S : depth <= CV_32F ? CV_32F : CV_64F; int wtype = CV_MAKETYPE(wdepth, src.channels()); Mat kd, ks; getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); if( ddepth < 0 ) ddepth = src.depth(); int dtype = CV_MAKETYPE(ddepth, src.channels()); int dy0 = std::min(std::max((int)(STRIPE_SIZE/(getElemSize(src.type())*src.cols)), 1), src.rows); Ptr<FilterEngine> fx = createSeparableLinearFilter(src.type(), wtype, kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); Ptr<FilterEngine> fy = createSeparableLinearFilter(src.type(), wtype, ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); int y = fx->start(src), dsty = 0, dy = 0; fy->start(src); const uchar* sptr = src.data + y*src.step; Mat d2x( dy0 + kd.rows - 1, src.cols, wtype ); Mat d2y( dy0 + kd.rows - 1, src.cols, wtype ); for( ; dsty < src.rows; sptr += dy0*src.step, dsty += dy ) { fx->proceed( sptr, (int)src.step, dy0, d2x.data, (int)d2x.step ); dy = fy->proceed( sptr, (int)src.step, dy0, d2y.data, (int)d2y.step ); if( dy > 0 ) { Mat dstripe = dst.rowRange(dsty, dsty + dy); d2x.rows = d2y.rows = dy; // modify the headers, which should work d2x += d2y; d2x.convertTo( dstripe, dtype, scale, delta ); } } } }
void cv::cornerSubPix( InputArray _image, InputOutputArray _corners, Size win, Size zeroZone, TermCriteria criteria ) { const int MAX_ITERS = 100; int win_w = win.width * 2 + 1, win_h = win.height * 2 + 1; int i, j, k; int max_iters = (criteria.type & CV_TERMCRIT_ITER) ? MIN(MAX(criteria.maxCount, 1), MAX_ITERS) : MAX_ITERS; double eps = (criteria.type & CV_TERMCRIT_EPS) ? MAX(criteria.epsilon, 0.) : 0; eps *= eps; // use square of error in comparsion operations cv::Mat src = _image.getMat(), cornersmat = _corners.getMat(); int count = cornersmat.checkVector(2, CV_32F); CV_Assert( count >= 0 ); Point2f* corners = (Point2f*)cornersmat.data; if( count == 0 ) return; CV_Assert( win.width > 0 && win.height > 0 ); CV_Assert( src.cols >= win.width*2 + 5 && src.rows >= win.height*2 + 5 ); CV_Assert( src.channels() == 1 ); Mat maskm(win_h, win_w, CV_32F), subpix_buf(win_h+2, win_w+2, CV_32F); float* mask = maskm.ptr<float>(); for( i = 0; i < win_h; i++ ) { float y = (float)(i - win.height)/win.height; float vy = std::exp(-y*y); for( j = 0; j < win_w; j++ ) { float x = (float)(j - win.width)/win.width; mask[i * win_w + j] = (float)(vy*std::exp(-x*x)); } } // make zero_zone if( zeroZone.width >= 0 && zeroZone.height >= 0 && zeroZone.width * 2 + 1 < win_w && zeroZone.height * 2 + 1 < win_h ) { for( i = win.height - zeroZone.height; i <= win.height + zeroZone.height; i++ ) { for( j = win.width - zeroZone.width; j <= win.width + zeroZone.width; j++ ) { mask[i * win_w + j] = 0; } } } // do optimization loop for all the points for( int pt_i = 0; pt_i < count; pt_i++ ) { Point2f cT = corners[pt_i], cI = cT; int iter = 0; double err = 0; do { Point2f cI2; double a = 0, b = 0, c = 0, bb1 = 0, bb2 = 0; getRectSubPix(src, Size(win_w+2, win_h+2), cI, subpix_buf, subpix_buf.type()); const float* subpix = &subpix_buf.at<float>(1,1); // process gradient for( i = 0, k = 0; i < win_h; i++, subpix += win_w + 2 ) { double py = i - win.height; for( j = 0; j < win_w; j++, k++ ) { double m = mask[k]; double tgx = subpix[j+1] - subpix[j-1]; double tgy = subpix[j+win_w+2] - subpix[j-win_w-2]; double gxx = tgx * tgx * m; double gxy = tgx * tgy * m; double gyy = tgy * tgy * m; double px = j - win.width; a += gxx; b += gxy; c += gyy; bb1 += gxx * px + gxy * py; bb2 += gxy * px + gyy * py; } } double det=a*c-b*b; if( fabs( det ) <= DBL_EPSILON*DBL_EPSILON ) break; // 2x2 matrix inversion double scale=1.0/det; cI2.x = (float)(cI.x + c*scale*bb1 - b*scale*bb2); cI2.y = (float)(cI.y - b*scale*bb1 + a*scale*bb2); err = (cI2.x - cI.x) * (cI2.x - cI.x) + (cI2.y - cI.y) * (cI2.y - cI.y); cI = cI2; if( cI.x < 0 || cI.x >= src.cols || cI.y < 0 || cI.y >= src.rows ) break; } while( ++iter < max_iters && err > eps ); // if new point is too far from initial, it means poor convergence. // leave initial point as the result if( fabs( cI.x - cT.x ) > win.width || fabs( cI.y - cT.y ) > win.height ) cI = cT; corners[pt_i] = cI; } }
void AffineTransformerImpl::warpImage(InputArray transformingImage, OutputArray output, int flags, int borderMode, const Scalar& borderValue) const { CV_Assert(!affineMat.empty()); warpAffine(transformingImage, output, affineMat, transformingImage.getMat().size(), flags, borderMode, borderValue); }
UMat dst = _dst.getUMat(); int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src)); idxArg = kernel.set(idxArg, (int)src.step); idxArg = kernel.set(idxArg, ocl::KernelArg::PtrWriteOnly(dst)); idxArg = kernel.set(idxArg, (int)dst.step); idxArg = kernel.set(idxArg, (int)dst.rows); idxArg = kernel.set(idxArg, (int)dst.cols); idxArg = kernel.set(idxArg, static_cast<float>(delta)); return kernel.run(2, globalsize, (localsize[0] == 0) ? NULL : localsize, false); } } #endif void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType ) { CV_INSTRUMENT_REGION() int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if (ddepth < 0) ddepth = sdepth; int dtype = CV_MAKE_TYPE(ddepth, cn); _dst.create( _src.size(), dtype ); int ktype = std::max(CV_32F, std::max(ddepth, sdepth)); Mat kx, ky; getDerivKernels( kx, ky, dx, dy, ksize, false, ktype ); if( scale != 1 )
//---------------------------------------------------------------------------- bool FxCompiler::UpdateShader (Shader* shader, const Program& program, InputArray& inputs, OutputArray& outputs, ConstantArray& constants, SamplerArray& samplers) { int numInputs = (int)inputs.size(); if (numInputs != shader->GetNumInputs()) { ReportError("Mismatch in number of inputs.\n"); return false; } int numOutputs = (int)outputs.size(); if (numOutputs != shader->GetNumOutputs()) { ReportError("Mismatch in number of outputs.\n"); return false; } int numConstants = (int)constants.size(); if (numConstants != shader->GetNumConstants()) { ReportError("Mismatch in number of constants.\n"); return false; } int numSamplers = (int)samplers.size(); if (numSamplers != shader->GetNumSamplers()) { ReportError("Mismatch in number of samplers.\n"); return false; } std::string message; int i; for (i = 0; i < numInputs; ++i) { Input& input = inputs[i]; if (input.Name != shader->GetInputName(i)) { message = "Mismatch in input names '" + input.Name + "' and '" + shader->GetInputName(i); ReportError(message); return false; } if (input.Type != shader->GetInputType(i)) { message = "Mismatch in input types '" + msVTName[input.Type] + "' and '" + msVTName[shader->GetInputType(i)]; ReportError(message); return false; } if (input.Semantic != shader->GetInputSemantic(i)) { message = "Mismatch in input semantics '" + msVSName[input.Semantic] + "' and '" + msVSName[shader->GetInputSemantic(i)]; ReportError(message); return false; } } for (i = 0; i < numOutputs; ++i) { Output& output = outputs[i]; if (output.Name != shader->GetOutputName(i)) { message = "Mismatch in output names '" + output.Name + "' and '" + shader->GetOutputName(i); ReportError(message); return false; } if (output.Type != shader->GetOutputType(i)) { message = "Mismatch in output types '" + msVTName[output.Type] + "' and '" + msVTName[shader->GetOutputType(i)]; ReportError(message); return false; } if (output.Semantic != shader->GetOutputSemantic(i)) { message = "Mismatch in output semantics '" + msVSName[output.Semantic] + "' and '" + msVSName[shader->GetOutputSemantic(i)]; ReportError(message); return false; } } for (i = 0; i < numConstants; ++i) { Constant& constant = constants[i]; if (constant.Name != shader->GetConstantName(i)) { message = "Mismatch in constant names '" + constant.Name + "' and '" + shader->GetConstantName(i); ReportError(message); return false; } if (constant.NumRegistersUsed != shader->GetNumRegistersUsed(i)) { char number0[8], number1[8]; sprintf(number0, "%d", constant.NumRegistersUsed); sprintf(number1, "%d", shader->GetNumRegistersUsed(i)); message = "Mismatch in constant registers used '" + std::string(number0) + "' and '" + std::string(number1); ReportError(message); return false; } shader->SetBaseRegister(mActiveProfile, i, constant.BaseRegister); } for (i = 0; i < numSamplers; ++i) { Sampler& sampler = samplers[i]; if (sampler.Name != shader->GetSamplerName(i)) { message = "Mismatch in sampler names '" + sampler.Name + "' and '" + shader->GetSamplerName(i); ReportError(message); return false; } if (sampler.Type != shader->GetSamplerType(i)) { message = "Mismatch in sampler types '" + msSTName[sampler.Type] + "' and '" + msSTName[shader->GetSamplerType(i)]; ReportError(message); return false; } shader->SetTextureUnit(mActiveProfile, i, sampler.Unit); } shader->SetProgram(mActiveProfile, program.Text); return true; }
static bool ipp_Deriv(InputArray _src, OutputArray _dst, int dx, int dy, int ksize, double scale, double delta, int borderType) { #ifdef HAVE_IPP_IW CV_INSTRUMENT_REGION_IPP() ::ipp::IwiSize size(_src.size().width, _src.size().height); IppDataType srcType = ippiGetDataType(_src.depth()); IppDataType dstType = ippiGetDataType(_dst.depth()); int channels = _src.channels(); bool useScale = false; bool useScharr = false; if(channels != _dst.channels() || channels > 1) return false; if(fabs(delta) > FLT_EPSILON || fabs(scale-1) > FLT_EPSILON) useScale = true; if(ksize <= 0) { ksize = 3; useScharr = true; } IppiMaskSize maskSize = ippiGetMaskSize(ksize, ksize); if((int)maskSize < 0) return false; #if IPP_VERSION_X100 <= 201703 // Bug with mirror wrap if(borderType == BORDER_REFLECT_101 && (ksize/2+1 > size.width || ksize/2+1 > size.height)) return false; #endif IwiDerivativeType derivType = ippiGetDerivType(dx, dy, (useScharr)?false:true); if((int)derivType < 0) return false; // Acquire data and begin processing try { Mat src = _src.getMat(); Mat dst = _dst.getMat(); ::ipp::IwiImage iwSrc = ippiGetImage(src); ::ipp::IwiImage iwDst = ippiGetImage(dst); ::ipp::IwiImage iwSrcProc = iwSrc; ::ipp::IwiImage iwDstProc = iwDst; ::ipp::IwiBorderSize borderSize(maskSize); ::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize)); if(!ippBorder) return false; if(srcType == ipp8u && dstType == ipp8u) { iwDstProc.Alloc(iwDst.m_size, ipp16s, channels); useScale = true; } else if(srcType == ipp8u && dstType == ipp32f) { iwSrc -= borderSize; iwSrcProc.Alloc(iwSrc.m_size, ipp32f, channels); CV_INSTRUMENT_FUN_IPP(::ipp::iwiScale, iwSrc, iwSrcProc, 1, 0, ::ipp::IwiScaleParams(ippAlgHintFast)); iwSrcProc += borderSize; } if(useScharr) CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterScharr, iwSrcProc, iwDstProc, derivType, maskSize, ::ipp::IwDefault(), ippBorder); else CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterSobel, iwSrcProc, iwDstProc, derivType, maskSize, ::ipp::IwDefault(), ippBorder); if(useScale) CV_INSTRUMENT_FUN_IPP(::ipp::iwiScale, iwDstProc, iwDst, scale, delta, ::ipp::IwiScaleParams(ippAlgHintFast)); } catch (::ipp::IwException) { return false; } return true; #else CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(dx); CV_UNUSED(dy); CV_UNUSED(ksize); CV_UNUSED(scale); CV_UNUSED(delta); CV_UNUSED(borderType); return false; #endif }
Marker Detector::decode_marker(InputArray img, Vec3f approx_circle) { Marker marker; // TODO: improve accuracy by using x,y from refined circle below marker.x = approx_circle[0]; marker.y = approx_circle[1]; auto roi = circle2rect(approx_circle, 2); // make sure our ROI remains inside the image roi = roi & Rect(Point(0,0), img.size()); Mat tag = img.getMat()(roi); //equalizeHist(tag, tag); //show_hist(tag); adaptiveThreshold(tag, tag, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY,15,0); #ifdef DEBUG imshow("Tag after thresholding", tag); #endif dilate(tag, tag, MORPHOLOGICAL_ELEMENT); erode(tag, tag, MORPHOLOGICAL_ELEMENT); #ifdef DEBUG imshow("Tag after dilation", tag); Mat toto; Canny(tag, toto, 100, 50); imshow("After canny", toto); #endif vector<Vec3f> circles; HoughCircles(tag, circles, CV_HOUGH_GRADIENT, 2.2, // dp 20, // min dist 20, // param 1 10, // param 2 16, // min radius 19); // max radius if (circles.empty()) return marker; // 'marker.valid' is false by default auto circle = circles[0]; roi = circle2rect(circle) & Rect(Point(0,0), tag.size()); Mat ctag = tag(roi); unfold_circle(ctag, marker); #ifdef DEBUG Mat cimg; cvtColor(tag, cimg, COLOR_GRAY2BGR); resize(cimg, cimg, Size(), 5, 5); Point center(cvRound(circle[0] * 5), cvRound(circle[1] * 5)); int radius = cvRound(circle[2] * 5); // circle center cv::circle( cimg, center, 1, Scalar(0,255,0)); // circle outline cv::circle( cimg, center, radius, Scalar(0,0,255), 1); imshow("Tag", cimg); #endif #ifdef DEBUG if (marker.valid) { cout << "Found marker " << (int) marker.id << " at (" << marker.x << ", " << marker.y << ", theta=" << marker.theta << " deg)" << endl; } waitKey(0); #endif return marker; }