void cvtColorInvariants(InputArray _src, OutputArray _dst, int code, int dcn ) { Mat src = _src.getMat(), dst; Size sz = src.size(); int scn = src.channels(), depth = src.depth(), bidx; int blueIdx; CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_32F ); switch(code) { case COLOR_INVARIANCE_PASSTHROUGH: dst = src; break; case COLOR_INVARIANCE_BGR2GAUSSIAN_OPPONENT: case COLOR_INVARIANCE_RGB2GAUSSIAN_OPPONENT: CV_Assert( scn == 3 || scn == 4 ); dcn = 3; blueIdx = (code == COLOR_INVARIANCE_BGR2GAUSSIAN_OPPONENT) ? 0 : 2; _dst.create( sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); if( depth == CV_8U ) CvtColorLoop(src, dst, GaussianOpponent<uchar>(scn, dcn, blueIdx,256)); else if( depth == CV_16U ) CvtColorLoop(src, dst, GaussianOpponent<ushort>(scn, dcn, blueIdx, 2<<16 ) ); else CvtColorLoop(src, dst, GaussianOpponent<float>(scn, dcn, blueIdx, 1 )); break; case COLOR_INVARIANCE_BGR2HInvariant: CV_Assert( scn == 3 || scn == 4 ); dcn = 1; blueIdx = (code == COLOR_INVARIANCE_BGR2HInvariant) ? 0 : 2; _dst.create( sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); if( depth == CV_8U ) CvtColorLoop(src, dst, HInvariant<uchar>(scn, dcn, blueIdx, 256)); else if( depth == CV_16U ) CvtColorLoop(src, dst, HInvariant<ushort>(scn, dcn, blueIdx, 2<<16 ) ); else CvtColorLoop(src, dst, HInvariant<float>(scn, dcn, blueIdx, 1 )); break; case COLOR_INVARIANCE_Gray2YB: CV_Assert( scn == 1 ); dcn = 3; _dst.create( sz, CV_MAKETYPE(CV_8U, 3)); dst = _dst.getMat(); if( depth == CV_8U ) CvtColorLoop(src, dst, Gray2YB<uchar>( 256 ) ); else if( depth == CV_16U ) CvtColorLoop(src, dst, Gray2YB<ushort>( 2<<16 ) ); else CvtColorLoop(src, dst, Gray2YB<float>( 1 ) ); break; case COLOR_INVARIANCE_Gray2RG: CV_Assert( scn == 1 ); dcn = 3; _dst.create( sz, CV_MAKETYPE(CV_8U, 3)); dst = _dst.getMat(); if( depth == CV_8U ) CvtColorLoop(src, dst, Gray2RG<uchar>( 256 ) ); else if( depth == CV_16U ) CvtColorLoop(src, dst, Gray2RG<ushort>( 2<<16 ) ); else CvtColorLoop(src, dst, Gray2RG<float>( 1 ) ); break; default: CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" ); } }
void BackgroundSubtractorGMG::getPosteriorImage(OutputArray _img) { _img.create(Size(imWidth,imHeight),CV_32F); Mat img = _img.getMat(); posteriorImage.copyTo(img); }
void cv::decolor(InputArray _src, OutputArray _dst, OutputArray _color_boost) { CV_INSTRUMENT_REGION(); Mat I = _src.getMat(); _dst.create(I.size(), CV_8UC1); Mat dst = _dst.getMat(); _color_boost.create(I.size(), CV_8UC3); Mat color_boost = _color_boost.getMat(); CV_Assert(!I.empty() && (I.channels()==3)); // Parameter Setting const int maxIter = 15; const double tol = .0001; int iterCount = 0; double E = 0; double pre_E = std::numeric_limits<double>::infinity(); Mat img; I.convertTo(img, CV_32FC3, 1.0/255.0); // Initialization Decolor obj; vector <double> Cg; vector < vector <double> > polyGrad; vector <Vec3i> comb; vector <double> alf; obj.grad_system(img,polyGrad,Cg,comb); obj.weak_order(img,alf); // Solver Mat Mt = Mat(int(polyGrad.size()),int(polyGrad[0].size()), CV_32FC1); obj.wei_update_matrix(polyGrad,Cg,Mt); vector <double> wei; obj.wei_inti(comb,wei); //////////////////////////////// main loop starting //////////////////////////////////////// vector <double> G_pos(alf.size()); vector <double> G_neg(alf.size()); vector <double> EXPsum(G_pos.size()); vector <double> EXPterm(G_pos.size()); vector <double> temp(polyGrad[0].size()); vector <double> temp1(polyGrad[0].size()); vector <double> temp2(EXPsum.size()); vector <double> wei1(polyGrad.size()); while(sqrt(pow(E-pre_E,2)) > tol) { iterCount +=1; pre_E = E; for(size_t i=0; i<polyGrad[0].size(); i++) { double val = 0.0; for(size_t j=0; j<polyGrad.size(); j++) val = val + (polyGrad[j][i] * wei[j]); temp[i] = val - Cg[i]; temp1[i] = val + Cg[i]; } for(size_t i=0; i<alf.size(); i++) { const double sqSigma = obj.sigma * obj.sigma; const double pos = ((1 + alf[i])/2) * exp(-1.0 * 0.5 * (temp[i] * temp[i]) / sqSigma); const double neg = ((1 - alf[i])/2) * exp(-1.0 * 0.5 * (temp1[i] * temp1[i]) / sqSigma); G_pos[i] = pos; G_neg[i] = neg; } for(size_t i=0; i<G_pos.size(); i++) EXPsum[i] = G_pos[i]+G_neg[i]; for(size_t i=0; i<EXPsum.size(); i++) temp2[i] = (EXPsum[i] == 0) ? 1.0 : 0.0; for(size_t i=0; i<G_pos.size(); i++) EXPterm[i] = (G_pos[i] - G_neg[i])/(EXPsum[i] + temp2[i]); for(int i=0; i<int(polyGrad.size()); i++) { double val1 = 0.0; for(int j=0; j<int(polyGrad[0].size()); j++) { val1 = val1 + (Mt.at<float>(i,j) * EXPterm[j]); } wei1[i] = val1; } for(size_t i=0; i<wei.size(); i++) wei[i] = wei1[i]; E = obj.energyCalcu(Cg, polyGrad, wei); if(iterCount > maxIter) break; } Mat Gray = Mat::zeros(img.size(),CV_32FC1); obj.grayImContruct(wei, img, Gray); Gray.convertTo(dst,CV_8UC1,255); /////////////////////////////////// Contrast Boosting ///////////////////////////////// Mat lab; cvtColor(I,lab,COLOR_BGR2Lab); vector <Mat> lab_channel; split(lab,lab_channel); dst.copyTo(lab_channel[0]); merge(lab_channel,lab); cvtColor(lab,color_boost,COLOR_Lab2BGR); }
void segmentMotion(InputArray _mhi, OutputArray _segmask, vector<Rect>& boundingRects, double timestamp, double segThresh) { Mat mhi = _mhi.getMat(); _segmask.create(mhi.size(), CV_32F); Mat segmask = _segmask.getMat(); segmask = Scalar::all(0); CV_Assert( mhi.type() == CV_32F ); CV_Assert( segThresh >= 0 ); Mat mask = Mat::zeros( mhi.rows + 2, mhi.cols + 2, CV_8UC1 ); int x, y; // protect zero mhi pixels from floodfill. for( y = 0; y < mhi.rows; y++ ) { const float* mhiptr = mhi.ptr<float>(y); uchar* maskptr = mask.ptr<uchar>(y+1) + 1; for( x = 0; x < mhi.cols; x++ ) { if( mhiptr[x] == 0 ) maskptr[x] = 1; } } float ts = (float)timestamp; float comp_idx = 1.f; for( y = 0; y < mhi.rows; y++ ) { float* mhiptr = mhi.ptr<float>(y); uchar* maskptr = mask.ptr<uchar>(y+1) + 1; for( x = 0; x < mhi.cols; x++ ) { if( mhiptr[x] == ts && maskptr[x] == 0 ) { Rect cc; floodFill( mhi, mask, Point(x,y), Scalar::all(0), &cc, Scalar::all(segThresh), Scalar::all(segThresh), FLOODFILL_MASK_ONLY + 2*256 + 4 ); for( int y1 = 0; y1 < cc.height; y1++ ) { float* segmaskptr = segmask.ptr<float>(cc.y + y1) + cc.x; uchar* maskptr1 = mask.ptr<uchar>(cc.y + y1 + 1) + cc.x + 1; for( int x1 = 0; x1 < cc.width; x1++ ) { if( maskptr1[x1] > 1 ) { maskptr1[x1] = 1; segmaskptr[x1] = comp_idx; } } } comp_idx += 1.f; boundingRects.push_back(cc); } } } }
void guiWeightedModeUpsample(InputArray srcimage, OutputArray dest, int resizeFactor, InputArray ref) { string windowName = "weighted mode"; namedWindow(windowName); Mat src = srcimage.getMat(); int alpha = 0; createTrackbar("a",windowName, &alpha, 100); int sw = 0; createTrackbar("sw",windowName, &sw, 1); int sw2 = 0; createTrackbar("sw2",windowName, &sw2, 1); int r = 3; createTrackbar("r",windowName, &r, 30); int sc = 40; createTrackbar("sigma_color",windowName, &sc, 255); int ss = 30; createTrackbar("sigma_space",windowName, &ss, 255); int sb = 10; createTrackbar("sigma_bin",windowName, &sb, 255); int iter = 2; createTrackbar("iteration",windowName, &iter, 10); int iter2 = 2; createTrackbar("iteration2",windowName, &iter2, 10); int key = 0; while(key!='q') { Mat srctemp; { Mat med; medianBlur(srcimage, med,1); src.copyTo(srctemp); CalcTime t; for(int i=0;i<iter;i++) { Mat tmp = srctemp.clone(); weightedModeFilter(srctemp, med, tmp, r, ss, sc, 2, sb); tmp.copyTo(srctemp); } //upsampling function if(sw==0) hqx(srctemp, dest, resizeFactor); else SAI(srctemp, dest); //else resize(srctemp, dest, Size(src.cols*resizeFactor, src.rows*resizeFactor), 0,0, INTER_LANCZOS4); } //shock filter if(sw2!=0) { Mat a = dest.getMat(); //blurRemoveMinMax(a,a,2); iterativeBackProjectionDeblurGaussian(a, a, Size(9,9), 3, 0.2, iter2); a.copyTo(dest); } //blending referece image for debug alphaBlend(ref, dest, alpha/100.0, dest); imshow(windowName, dest); key = waitKey(30); if(key=='f') { alpha = (alpha != 0) ? 0:100; setTrackbarPos("a", windowName, alpha); } } destroyWindow(windowName); }
void triangulatePoints(InputArrayOfArrays _points2d, InputArrayOfArrays _projection_matrices, OutputArray _points3d) { // check size_t nviews = (unsigned) _points2d.total(); CV_Assert(nviews >= 2 && nviews == _projection_matrices.total()); // inputs size_t n_points; vector<Mat_<double> > points2d(nviews); vector<Matx34d> projection_matrices(nviews); { vector<Mat> points2d_tmp; _points2d.getMatVector(points2d_tmp); n_points = points2d_tmp[0].cols; vector<Mat> projection_matrices_tmp; _projection_matrices.getMatVector(projection_matrices_tmp); // Make sure the dimensions are right for(size_t i=0; i<nviews; ++i) { CV_Assert(points2d_tmp[i].rows == 2 && points2d_tmp[i].cols == n_points); if (points2d_tmp[i].type() == CV_64F) points2d[i] = points2d_tmp[i]; else points2d_tmp[i].convertTo(points2d[i], CV_64F); CV_Assert(projection_matrices_tmp[i].rows == 3 && projection_matrices_tmp[i].cols == 4); if (projection_matrices_tmp[i].type() == CV_64F) projection_matrices[i] = projection_matrices_tmp[i]; else projection_matrices_tmp[i].convertTo(projection_matrices[i], CV_64F); } } // output _points3d.create(3, n_points, CV_64F); cv::Mat points3d = _points3d.getMat(); // Two view if( nviews == 2 ) { const Mat_<double> &xl = points2d[0], &xr = points2d[1]; const Matx34d & Pl = projection_matrices[0]; // left matrix projection const Matx34d & Pr = projection_matrices[1]; // right matrix projection // triangulate for( unsigned i = 0; i < n_points; ++i ) { Vec3d point3d; triangulateDLT( Vec2d(xl(0,i), xl(1,i)), Vec2d(xr(0,i), xr(1,i)), Pl, Pr, point3d ); for(char j=0; j<3; ++j) points3d.at<double>(j, i) = point3d[j]; } } else if( nviews > 2 ) { // triangulate for( unsigned i=0; i < n_points; ++i ) { // build x matrix (one point per view) Mat_<double> x( 2, nviews ); for( unsigned k=0; k < nviews; ++k ) { points2d.at(k).col(i).copyTo( x.col(k) ); } Vec3d point3d; nViewTriangulate( x, projection_matrices, point3d ); for(char j=0; j<3; ++j) points3d.at<double>(j, i) = point3d[j]; } } }
void compute(InputArray leftarr, InputArray rightarr, OutputArray disparr) { int dtype = disparr.fixedType() ? disparr.type() : params.dispType; Size leftsize = leftarr.size(); if (leftarr.size() != rightarr.size()) CV_Error(Error::StsUnmatchedSizes, "All the images must have the same size"); if (leftarr.type() != CV_8UC1 || rightarr.type() != CV_8UC1) CV_Error(Error::StsUnsupportedFormat, "Both input images must have CV_8UC1"); if (dtype != CV_16SC1 && dtype != CV_32FC1) CV_Error(Error::StsUnsupportedFormat, "Disparity image must have CV_16SC1 or CV_32FC1 format"); if (params.preFilterType != PREFILTER_NORMALIZED_RESPONSE && params.preFilterType != PREFILTER_XSOBEL) CV_Error(Error::StsOutOfRange, "preFilterType must be = CV_STEREO_BM_NORMALIZED_RESPONSE"); if (params.preFilterSize < 5 || params.preFilterSize > 255 || params.preFilterSize % 2 == 0) CV_Error(Error::StsOutOfRange, "preFilterSize must be odd and be within 5..255"); if (params.preFilterCap < 1 || params.preFilterCap > 63) CV_Error(Error::StsOutOfRange, "preFilterCap must be within 1..63"); if (params.kernelSize < 5 || params.kernelSize > 255 || params.kernelSize % 2 == 0 || params.kernelSize >= std::min(leftsize.width, leftsize.height)) CV_Error(Error::StsOutOfRange, "kernelSize must be odd, be within 5..255 and be not larger than image width or height"); if (params.numDisparities <= 0 || params.numDisparities % 16 != 0) CV_Error(Error::StsOutOfRange, "numDisparities must be positive and divisble by 16"); if (params.textureThreshold < 0) CV_Error(Error::StsOutOfRange, "texture threshold must be non-negative"); if (params.uniquenessRatio < 0) CV_Error(Error::StsOutOfRange, "uniqueness ratio must be non-negative"); int FILTERED = (params.minDisparity - 1) << DISPARITY_SHIFT; Mat left0 = leftarr.getMat(), right0 = rightarr.getMat(); Mat disp0 = disparr.getMat(); int width = left0.cols; int height = left0.rows; if(previous_size != width * height) { previous_size = width * height; speckleX.create(height,width,CV_32SC4); speckleY.create(height,width,CV_32SC4); puss.create(height,width,CV_32SC4); censusImage[0].create(left0.rows,left0.cols,CV_32SC4); censusImage[1].create(left0.rows,left0.cols,CV_32SC4); partialSumsLR.create(left0.rows + 1,(left0.cols + 1) * (params.numDisparities + 1),CV_16S); agregatedHammingLRCost.create(left0.rows + 1,(left0.cols + 1) * (params.numDisparities + 1),CV_16S); hammingDistance.create(left0.rows, left0.cols * (params.numDisparities + 1),CV_16S); preFilteredImg0.create(left0.size(), CV_8U); preFilteredImg1.create(left0.size(), CV_8U); aux.create(height,width,CV_8UC1); } Mat left = preFilteredImg0, right = preFilteredImg1; int ndisp = params.numDisparities; int wsz = params.kernelSize; int bufSize0 = (int)((ndisp + 2)*sizeof(int)); bufSize0 += (int)((height + wsz + 2)*ndisp*sizeof(int)); bufSize0 += (int)((height + wsz + 2)*sizeof(int)); bufSize0 += (int)((height + wsz + 2)*ndisp*(wsz + 2)*sizeof(uchar) + 256); int bufSize1 = (int)((width + params.preFilterSize + 2) * sizeof(int) + 256); if(params.usePrefilter == true) { uchar *_buf = slidingSumBuf.ptr(); parallel_for_(Range(0, 2), PrefilterInvoker(left0, right0, left, right, _buf, _buf + bufSize1, ¶ms), 1); } else if(params.usePrefilter == false) { left = left0; right = right0; } if(params.kernelType == CV_SPARSE_CENSUS) { censusTransform(left,right,params.kernelSize,censusImage[0],censusImage[1],CV_SPARSE_CENSUS); } else if(params.kernelType == CV_DENSE_CENSUS) { censusTransform(left,right,params.kernelSize,censusImage[0],censusImage[1],CV_SPARSE_CENSUS); } else if(params.kernelType == CV_CS_CENSUS) { symetricCensusTransform(left,right,params.kernelSize,censusImage[0],censusImage[1],CV_CS_CENSUS); } else if(params.kernelType == CV_MODIFIED_CS_CENSUS) { symetricCensusTransform(left,right,params.kernelSize,censusImage[0],censusImage[1],CV_MODIFIED_CS_CENSUS); } else if(params.kernelType == CV_MODIFIED_CENSUS_TRANSFORM) { modifiedCensusTransform(left,right,params.kernelSize,censusImage[0],censusImage[1],CV_MODIFIED_CENSUS_TRANSFORM,0); } else if(params.kernelType == CV_MEAN_VARIATION) { parSumsIntensityImage[0].create(left0.rows, left0.cols,CV_32SC4); parSumsIntensityImage[1].create(left0.rows, left0.cols,CV_32SC4); Integral[0].create(left0.rows,left0.cols,CV_32SC4); Integral[1].create(left0.rows,left0.cols,CV_32SC4); integral(left, parSumsIntensityImage[0],CV_32S); integral(right, parSumsIntensityImage[1],CV_32S); imageMeanKernelSize(parSumsIntensityImage[0], params.kernelSize,Integral[0]); imageMeanKernelSize(parSumsIntensityImage[1], params.kernelSize, Integral[1]); modifiedCensusTransform(left,right,params.kernelSize,censusImage[0],censusImage[1],CV_MEAN_VARIATION,0,Integral[0], Integral[1]); } else if(params.kernelType == CV_STAR_KERNEL) { starCensusTransform(left,right,params.kernelSize,censusImage[0],censusImage[1]); } hammingDistanceBlockMatching(censusImage[0], censusImage[1], hammingDistance); costGathering(hammingDistance, partialSumsLR); blockAgregation(partialSumsLR, params.agregationWindowSize, agregatedHammingLRCost); dispartyMapFormation(agregatedHammingLRCost, disp0, 3); Median1x9Filter<uint8_t>(disp0, aux); Median9x1Filter<uint8_t>(aux,disp0); if(params.regionRemoval == CV_SPECKLE_REMOVAL_AVG_ALGORITHM) { smallRegionRemoval<uint8_t>(disp0,params.speckleWindowSize,disp0); } else if(params.regionRemoval == CV_SPECKLE_REMOVAL_ALGORITHM) { if (params.speckleRange >= 0 && params.speckleWindowSize > 0) filterSpeckles(disp0, FILTERED, params.speckleWindowSize, params.speckleRange, slidingSumBuf); } }
/******************************************************************************* * Function: subtractBGOpenDiagonal * Description: BG subtraction via opening with diagonal structuring elements * Arguments: inImg - input image bgsImg - BG subtracted image threshVal - threshold value for converting to binary image seLength - length of structuring elements * Returns: void * Comments: * Revision: *******************************************************************************/ int FGExtraction::subtractBGOpenDiagonal(InputArray src, OutputArray dst, int threshVal, int seLength) { // generate binary image by thresholding Mat bin; double thresh = threshold(src, bin, threshVal, 255, THRESH_BINARY); // opening by horizontal structuring element //Mat structElemHorizontal = Mat::ones(1, seLength, CV_8U); //morphologyEx(bin, dst, MORPH_OPEN, structElemHorizontal); // opening by vertical structuring element //Mat structElemVertical = Mat::ones(seLength, 1, CV_8U); //morphologyEx(dst, dst, MORPH_OPEN, structElemVertical); //imshow("src", src); //imshow("bin", bin); //waitKey(0); // opening by first diagonal structuring element Mat structElemBackSlash = Mat::eye(seLength, seLength, CV_8U); morphologyEx(bin, dst, MORPH_OPEN, structElemBackSlash); //imshow("dst1", dst); //waitKey(0); // opening by second diagonal structuring element Mat structElemSlash; flip(structElemBackSlash, structElemSlash, 0); morphologyEx(dst, dst, MORPH_OPEN, structElemSlash); //imshow("dst2", dst); //waitKey(0); // eliminate small noise Mat structElemEllip = getStructuringElement(MORPH_ELLIPSE, Size(seLength, seLength)); morphologyEx(dst, dst, MORPH_OPEN, structElemEllip); //imshow("dst3", dst); //waitKey(0); // get object size Mat dstImg = dst.getMat(); vector<vector<Point>> contours = extractContours(dstImg); if (contours.size()==0) return 1; Mat mask = Mat::zeros(_bgsImg.size(), CV_8U); vector<int> areas(contours.size()); int cnt = 0; int argMax = 0; int max_area = 0; for(vector<vector<Point> >::const_iterator it = contours.begin(); it != contours.end(); ++it){ Rect uprightBox = boundingRect(*it); areas[cnt] = uprightBox.height*uprightBox.width; if (areas[cnt]>max_area) { max_area = areas[cnt]; argMax = cnt; } cnt++; } vector<Point> largestContour = contours[argMax]; //***** only use the largest contour RotatedRect orientedBox = orientedBoundingBox(largestContour); int updateSeL = int(min(orientedBox.size.width, orientedBox.size.height)/5.0+0.5); // opening by first diagonal structuring element structElemBackSlash = Mat::eye(updateSeL, updateSeL, CV_8U); morphologyEx(bin, dst, MORPH_OPEN, structElemBackSlash); //imshow("dst1", dst); //waitKey(0); // opening by second diagonal structuring element flip(structElemBackSlash, structElemSlash, 0); morphologyEx(dst, dst, MORPH_OPEN, structElemSlash); //imshow("dst2", dst); //waitKey(0); // eliminate small noise structElemEllip = getStructuringElement(MORPH_ELLIPSE, Size(updateSeL, updateSeL)); morphologyEx(dst, dst, MORPH_OPEN, structElemEllip); //imshow("dst3", dst); //waitKey(0); return 0; }
void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, int method ) { CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED ); int numType = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 : method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2; bool isNormed = method == CV_TM_CCORR_NORMED || method == CV_TM_SQDIFF_NORMED || method == CV_TM_CCOEFF_NORMED; Mat img = _img.getMat(), templ = _templ.getMat(); if( img.rows < templ.rows || img.cols < templ.cols ) std::swap(img, templ); CV_Assert( (img.depth() == CV_8U || img.depth() == CV_32F) && img.type() == templ.type() ); Size corrSize(img.cols - templ.cols + 1, img.rows - templ.rows + 1); _result.create(corrSize, CV_32F); Mat result = _result.getMat(); int cn = img.channels(); crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0); if( method == CV_TM_CCORR ) return; double invArea = 1./((double)templ.rows * templ.cols); Mat sum, sqsum; Scalar templMean, templSdv; double *q0 = 0, *q1 = 0, *q2 = 0, *q3 = 0; double templNorm = 0, templSum2 = 0; if( method == CV_TM_CCOEFF ) { integral(img, sum, CV_64F); templMean = mean(templ); } else { integral(img, sum, sqsum, CV_64F); meanStdDev( templ, templMean, templSdv ); templNorm = CV_SQR(templSdv[0]) + CV_SQR(templSdv[1]) + CV_SQR(templSdv[2]) + CV_SQR(templSdv[3]); if( templNorm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED ) { result = Scalar::all(1); return; } templSum2 = templNorm + CV_SQR(templMean[0]) + CV_SQR(templMean[1]) + CV_SQR(templMean[2]) + CV_SQR(templMean[3]); if( numType != 1 ) { templMean = Scalar::all(0); templNorm = templSum2; } templSum2 /= invArea; templNorm = sqrt(templNorm); templNorm /= sqrt(invArea); // care of accuracy here q0 = (double*)sqsum.data; q1 = q0 + templ.cols*cn; q2 = (double*)(sqsum.data + templ.rows*sqsum.step); q3 = q2 + templ.cols*cn; } double* p0 = (double*)sum.data; double* p1 = p0 + templ.cols*cn; double* p2 = (double*)(sum.data + templ.rows*sum.step); double* p3 = p2 + templ.cols*cn; int sumstep = sum.data ? (int)(sum.step / sizeof(double)) : 0; int sqstep = sqsum.data ? (int)(sqsum.step / sizeof(double)) : 0; int i, j, k; for( i = 0; i < result.rows; i++ ) { float* rrow = (float*)(result.data + i*result.step); int idx = i * sumstep; int idx2 = i * sqstep; for( j = 0; j < result.cols; j++, idx += cn, idx2 += cn ) { double num = rrow[j], t; double wndMean2 = 0, wndSum2 = 0; if( numType == 1 ) { for( k = 0; k < cn; k++ ) { t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k]; wndMean2 += CV_SQR(t); num -= t*templMean[k]; } wndMean2 *= invArea; } if( isNormed || numType == 2 ) { for( k = 0; k < cn; k++ ) { t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k]; wndSum2 += t; } if( numType == 2 ) num = wndSum2 - 2*num + templSum2; } if( isNormed ) { t = sqrt(MAX(wndSum2 - wndMean2,0))*templNorm; if( fabs(num) < t ) num /= t; else if( fabs(num) < t*1.125 ) num = num > 0 ? 1 : -1; else num = method != CV_TM_SQDIFF_NORMED ? 0 : 1; } rrow[j] = (float)num; } } }
template <typename _Tp> static inline void ulbp_(InputArray _src, OutputArray _dst, int radius, int neighbors, vector<int> m_uniform) { if (neighbors != 8 || m_uniform.size() != 58) { cout << "neighbor must be 8! and uniform size be 58!\n"; system("pause"); exit(-1); } //get matrices Mat src = _src.getMat(); // allocate memory for result _dst.create(src.rows-2*radius, src.cols-2*radius, CV_32SC1); Mat dst = _dst.getMat(); // zero dst.setTo(0); for(int n=0; n<neighbors; n++) { // sample points float x = static_cast<float>(-radius) * sin(2.0*CV_PI*n/static_cast<float>(neighbors)); float y = static_cast<float>(radius) * cos(2.0*CV_PI*n/static_cast<float>(neighbors)); // relative indices int fx = static_cast<int>(floor(x)); int fy = static_cast<int>(floor(y)); int cx = static_cast<int>(ceil(x)); int cy = static_cast<int>(ceil(y)); // fractional part float ty = y - fy; float tx = x - fx; // set interpolation weights float w1 = (1 - tx) * (1 - ty); float w2 = tx * (1 - ty); float w3 = (1 - tx) * ty; float w4 = tx * ty; // iterate through your data for(int i=radius; i < src.rows-radius; i++) { for(int j=radius; j < src.cols-radius; j++) { // calculate interpolated value float t = w1*src.at<_Tp>(i+fy,j+fx) + w2*src.at<_Tp>(i+fy,j+cx) + w3*src.at<_Tp>(i+cy,j+fx) + w4*src.at<_Tp>(i+cy,j+cx); // floating point precision, so check some machine-dependent epsilon dst.at<int>(i-radius,j-radius) += ((t > src.at<_Tp>(i,j)) || (std::abs(t-src.at<_Tp>(i,j)) < std::numeric_limits<float>::epsilon())) << n; } } } for (int i = 0; i < dst.rows; ++i) { for (int j = 0; j < dst.cols; ++j) { int data = dst.at<int>(i, j); vector<int>::iterator iter = find(m_uniform.begin(), m_uniform.end(), data); if (iter == m_uniform.end()) { dst.at<int>(i, j) = 0; } else { int new_data = iter - m_uniform.begin() ; dst.at<int>(i, j) = new_data + 1; } } } }
void FeatureShiCorner::computeRawCornerMat( InputArray _image, OutputArray _corner ) { // TODO check: _corner must be CV_32SC1 const Mat image = _image.getMat(); const int height = image.rows; const int width = image.cols; const int radius = 1; Mat derX( height, width, CV_32SC1, Scalar( 0 ) ); Mat derY( height, width, CV_32SC1, Scalar( 0 ) ); Mat Mx2( height, width, CV_32SC1, Scalar( 0 ) ); Mat My2( height, width, CV_32SC1, Scalar( 0 ) ); Mat Mxy( height, width, CV_32SC1, Scalar( 0 ) ); applyFilter< uchar, int32_t >( _image, derX, &filter_derX[0][0], 3, 1, 0, true ); applyFilter< uchar, int32_t >( _image, derY, &filter_derY[0][0], 1, 3, 0, true ); int normDivisor = 0; const int * pGauss = &FeatureShiCorner::filter_gauss[0][0]; int const * pGaussE = pGauss + 9; for(; pGauss != pGaussE; pGauss++ ) { normDivisor += abs( *pGauss ); } int32_t maxVal = 0; for( int y = 0; y < height; y++ ) { for( int x = 0; x < width; x++ ) { for( int dy = -radius; dy <= radius; dy++ ) { for( int dx = -radius; dx <= radius; dx++ ) { int fx = x + dx; if( (fx < 0) || (fx >= width) ) { continue; } int fy = y + dy; if( (fy < 0) || (fy >= height) ) { continue; } int f = FeatureShiCorner::filter_gauss[(radius + dx)][(radius + dy)]; Mx2.at< int32_t >( y, x ) += int32_t( f * pow( derX.at< int32_t >( fy, fx ), 2 ) ); My2.at< int32_t >( y, x ) += int32_t( f * pow( derY.at< int32_t >( fy, fx ), 2 ) ); Mxy.at< int32_t >( y, x ) += int32_t( f * derX.at< int32_t >( fy, fx ) * derY.at< int >( fy, fx ) ); } } Mx2.at< int32_t >( y, x ) /= normDivisor; My2.at< int32_t >( y, x ) /= normDivisor; Mxy.at< int32_t >( y, x ) /= normDivisor; maxVal = max( Mx2.at< int32_t >( y, x ), maxVal ); maxVal = max( My2.at< int32_t >( y, x ), maxVal ); maxVal = max( Mxy.at< int32_t >( y, x ), maxVal ); } } Mat corners = _corner.getMat(); const auto it_cE = corners.end< int32_t >(); auto it_cS = corners.begin< int32_t >(); auto it_Mx2S = Mx2.begin< int32_t >(); auto it_My2S = My2.begin< int32_t >(); auto it_MxyS = Mxy.begin< int32_t >(); // reduce to high values if necessary // maxval: 0..1 * 255^2, maxval^2 should not overflow for the next step // reduce to sqrt( 2^31-1 (signed int) ) -> 46340 const int maxValC = 46340; if( maxVal > maxValC ) { cout << "maxVal > maxValC | maxVal: " << maxVal << endl; const double scaleFac = maxValC / (double) maxVal; // scaleFac = 0.xxxx while( it_cS != it_cE ) { *it_cS *= int32_t( scaleFac ); *it_Mx2S *= int32_t( scaleFac ); *it_My2S *= int32_t( scaleFac ); *it_MxyS *= int32_t( scaleFac ); it_cS++; it_Mx2S++; it_My2S++; it_MxyS++; } // reset iterators it_cS = corners.begin< int32_t >(); it_Mx2S = Mx2.begin< int32_t >(); it_My2S = My2.begin< int32_t >(); it_MxyS = Mxy.begin< int32_t >(); } maxVal = 0; // calc eigenvalues int32_t trc, det; double ev_sqrt, trc_halve, eigVal1, eigVal2; while( it_cS != it_cE ) { trc = *it_Mx2S + *it_My2S; det = *it_Mx2S * *it_My2S - *it_MxyS * *it_MxyS; ev_sqrt = sqrt( ( (trc * trc) / 4 ) - det ); trc_halve = trc / 2.0; eigVal1 = trc_halve + ev_sqrt; eigVal2 = trc_halve - ev_sqrt; if( (eigVal1 < 0) || (eigVal2 < 0) ) { eigVal1 = 0; eigVal2 = 0; } *it_cS = (int32_t) min( eigVal1, eigVal2 ); maxVal = max( (int32_t) min( eigVal1, eigVal2 ), maxVal ); it_cS++; it_Mx2S++; it_My2S++; it_MxyS++; } // * if( maxVal != 0 ) { const double threshold = maxVal * 0.2; for( auto it_cE = corners.end< int32_t >(), it_cS = corners.begin< int32_t >(); it_cS != it_cE; it_cS++ ) { if( *it_cS < threshold ) { *it_cS = 0; } } } // */ // * Mat cornersFiltered( height, width, CV_32SC1 ); maxFilter< int32_t >( corners, cornersFiltered, 5, 5 ); // */ if( isDebugMode ) { Mat derXd, derYd, cornersd; cornersFiltered.convertTo( cornersd, CV_8UC1 ); derX.convertTo( derXd, CV_8UC1 ); derY.convertTo( derYd, CV_8UC1 ); // Display corners over the image (cross) Mat cornersdc = image.clone(); auto cornerPoints = genPoints( cornersFiltered ); for( auto p : cornerPoints ) { for( int dx = -2; dx <= 2; dx++ ) { int x = p.first + dx; int y = p.second; if( ( x < 0) || ( x >= width) ) { continue; } cornersdc.at< uchar >( y, x ) = 0; } for( int dy = -2; dy <= 2; dy++ ) { int x = p.first; int y = p.second + dy; if( ( y < 0) || ( y >= height) ) { continue; } cornersdc.at< uchar >( y, x ) = 0; } } imshow( "image", image ); imshow( "derX", derXd ); imshow( "derY", derYd ); imshow( "Shi Corner", cornersd ); imshow( "Shi Corner Image", cornersdc ); waitKey( 0 ); destroyAllWindows(); waitKey( 1 ); } }
void cv::decolor(InputArray _src, OutputArray _dst, OutputArray _color_boost) { Mat I = _src.getMat(); _dst.create(I.size(), CV_8UC1); Mat dst = _dst.getMat(); _color_boost.create(I.size(), CV_8UC3); Mat color_boost = _color_boost.getMat(); if(!I.data ) { cout << "Could not open or find the image" << endl ; return; } if(I.channels() !=3) { cout << "Input Color Image" << endl; return; } // Parameter Setting int maxIter = 15; int iterCount = 0; double tol = .0001; double E = 0; double pre_E = std::numeric_limits<double>::infinity(); Decolor obj; Mat img; img = Mat(I.size(),CV_32FC3); I.convertTo(img,CV_32FC3,1.0/255.0); // Initialization obj.init(); vector <double> Cg; vector < vector <double> > polyGrad; vector < vector < int > > comb; vector <double> alf; obj.grad_system(img,polyGrad,Cg,comb); obj.weak_order(img,alf); // Solver Mat Mt = Mat(polyGrad.size(),polyGrad[0].size(), CV_32FC1); obj.wei_update_matrix(polyGrad,Cg,Mt); vector <double> wei; obj.wei_inti(comb,wei); //////////////////////////////// main loop starting //////////////////////////////////////// while(sqrt(pow(E-pre_E,2)) > tol) { iterCount +=1; pre_E = E; vector <double> G_pos; vector <double> G_neg; vector <double> temp; vector <double> temp1; double val = 0.0; for(unsigned int i=0;i< polyGrad[0].size();i++) { val = 0.0; for(unsigned int j =0;j<polyGrad.size();j++) val = val + (polyGrad[j][i] * wei[j]); temp.push_back(val - Cg[i]); temp1.push_back(val + Cg[i]); } double pos = 0.0; double neg = 0.0; for(unsigned int i =0;i<alf.size();i++) { pos = ((1 + alf[i])/2) * exp((-1.0 * 0.5 * pow(temp[i],2))/pow(obj.sigma,2)); neg = ((1 - alf[i])/2) * exp((-1.0 * 0.5 * pow(temp1[i],2))/pow(obj.sigma,2)); G_pos.push_back(pos); G_neg.push_back(neg); } vector <double> EXPsum; vector <double> EXPterm; for(unsigned int i = 0;i<G_pos.size();i++) EXPsum.push_back(G_pos[i]+G_neg[i]); vector <double> temp2; for(unsigned int i=0;i<EXPsum.size();i++) { if(EXPsum[i] == 0) temp2.push_back(1.0); else temp2.push_back(0.0); } for(unsigned int i =0; i < G_pos.size();i++) EXPterm.push_back((G_pos[i] - G_neg[i])/(EXPsum[i] + temp2[i])); double val1 = 0.0; vector <double> wei1; for(unsigned int i=0;i< polyGrad.size();i++) { val1 = 0.0; for(unsigned int j =0;j<polyGrad[0].size();j++) { val1 = val1 + (Mt.at<float>(i,j) * EXPterm[j]); } wei1.push_back(val1); } for(unsigned int i =0;i<wei.size();i++) wei[i] = wei1[i]; E = obj.energyCalcu(Cg,polyGrad,wei); if(iterCount > maxIter) break; G_pos.clear(); G_neg.clear(); temp.clear(); temp1.clear(); EXPsum.clear(); EXPterm.clear(); temp2.clear(); wei1.clear(); } Mat Gray = Mat::zeros(img.size(),CV_32FC1); obj.grayImContruct(wei, img, Gray); Gray.convertTo(dst,CV_8UC1,255); /////////////////////////////////// Contrast Boosting ///////////////////////////////// Mat lab = Mat(img.size(),CV_8UC3); Mat color = Mat(img.size(),CV_8UC3); cvtColor(I,lab,COLOR_BGR2Lab); vector <Mat> lab_channel; split(lab,lab_channel); dst.copyTo(lab_channel[0]); merge(lab_channel,lab); cvtColor(lab,color_boost,COLOR_Lab2BGR); }
void spatialGradient( InputArray _src, OutputArray _dx, OutputArray _dy, int ksize, int borderType ) { CV_INSTRUMENT_REGION() // Prepare InputArray src Mat src = _src.getMat(); CV_Assert( !src.empty() ); CV_Assert( src.type() == CV_8UC1 ); CV_Assert( borderType == BORDER_DEFAULT || borderType == BORDER_REPLICATE ); // Prepare OutputArrays dx, dy _dx.create( src.size(), CV_16SC1 ); _dy.create( src.size(), CV_16SC1 ); Mat dx = _dx.getMat(), dy = _dy.getMat(); // TODO: Allow for other kernel sizes CV_Assert(ksize == 3); // Get dimensions const int H = src.rows, W = src.cols; // Row, column indices int i = 0, j = 0; // Handle border types int i_top = 0, // Case for H == 1 && W == 1 && BORDER_REPLICATE i_bottom = H - 1, j_offl = 0, // j offset from 0th pixel to reach -1st pixel j_offr = 0; // j offset from W-1th pixel to reach Wth pixel if ( borderType == BORDER_DEFAULT ) // Equiv. to BORDER_REFLECT_101 { if ( H > 1 ) { i_top = 1; i_bottom = H - 2; } if ( W > 1 ) { j_offl = 1; j_offr = -1; } } // Pointer to row vectors uchar *p_src, *c_src, *n_src; // previous, current, next row short *c_dx, *c_dy; int i_start = 0; int j_start = 0; #if CV_SIMD128 && CV_SSE2 if(hasSIMD128()) { uchar *m_src; short *n_dx, *n_dy; // Characters in variable names have the following meanings: // u: unsigned char // s: signed int // // [row][column] // m: offset -1 // n: offset 0 // p: offset 1 // Example: umn is offset -1 in row and offset 0 in column for ( i = 0; i < H - 1; i += 2 ) { if ( i == 0 ) p_src = src.ptr<uchar>(i_top); else p_src = src.ptr<uchar>(i-1); c_src = src.ptr<uchar>(i); n_src = src.ptr<uchar>(i+1); if ( i == H - 2 ) m_src = src.ptr<uchar>(i_bottom); else m_src = src.ptr<uchar>(i+2); c_dx = dx.ptr<short>(i); c_dy = dy.ptr<short>(i); n_dx = dx.ptr<short>(i+1); n_dy = dy.ptr<short>(i+1); v_uint8x16 v_select_m = v_uint8x16(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF); // Process rest of columns 16-column chunks at a time for ( j = 1; j < W - 16; j += 16 ) { // Load top row for 3x3 Sobel filter v_uint8x16 v_um = v_load(&p_src[j-1]); v_uint8x16 v_up = v_load(&p_src[j+1]); // TODO: Replace _mm_slli_si128 with hal method v_uint8x16 v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)), v_uint8x16(_mm_srli_si128(v_um.val, 1))); v_uint16x8 v_um1, v_um2, v_un1, v_un2, v_up1, v_up2; v_expand(v_um, v_um1, v_um2); v_expand(v_un, v_un1, v_un2); v_expand(v_up, v_up1, v_up2); v_int16x8 v_s1m1 = v_reinterpret_as_s16(v_um1); v_int16x8 v_s1m2 = v_reinterpret_as_s16(v_um2); v_int16x8 v_s1n1 = v_reinterpret_as_s16(v_un1); v_int16x8 v_s1n2 = v_reinterpret_as_s16(v_un2); v_int16x8 v_s1p1 = v_reinterpret_as_s16(v_up1); v_int16x8 v_s1p2 = v_reinterpret_as_s16(v_up2); // Load second row for 3x3 Sobel filter v_um = v_load(&c_src[j-1]); v_up = v_load(&c_src[j+1]); // TODO: Replace _mm_slli_si128 with hal method v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)), v_uint8x16(_mm_srli_si128(v_um.val, 1))); v_expand(v_um, v_um1, v_um2); v_expand(v_un, v_un1, v_un2); v_expand(v_up, v_up1, v_up2); v_int16x8 v_s2m1 = v_reinterpret_as_s16(v_um1); v_int16x8 v_s2m2 = v_reinterpret_as_s16(v_um2); v_int16x8 v_s2n1 = v_reinterpret_as_s16(v_un1); v_int16x8 v_s2n2 = v_reinterpret_as_s16(v_un2); v_int16x8 v_s2p1 = v_reinterpret_as_s16(v_up1); v_int16x8 v_s2p2 = v_reinterpret_as_s16(v_up2); // Load third row for 3x3 Sobel filter v_um = v_load(&n_src[j-1]); v_up = v_load(&n_src[j+1]); // TODO: Replace _mm_slli_si128 with hal method v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)), v_uint8x16(_mm_srli_si128(v_um.val, 1))); v_expand(v_um, v_um1, v_um2); v_expand(v_un, v_un1, v_un2); v_expand(v_up, v_up1, v_up2); v_int16x8 v_s3m1 = v_reinterpret_as_s16(v_um1); v_int16x8 v_s3m2 = v_reinterpret_as_s16(v_um2); v_int16x8 v_s3n1 = v_reinterpret_as_s16(v_un1); v_int16x8 v_s3n2 = v_reinterpret_as_s16(v_un2); v_int16x8 v_s3p1 = v_reinterpret_as_s16(v_up1); v_int16x8 v_s3p2 = v_reinterpret_as_s16(v_up2); // dx & dy for rows 1, 2, 3 v_int16x8 v_sdx1, v_sdy1; spatialGradientKernel<v_int16x8>( v_sdx1, v_sdy1, v_s1m1, v_s1n1, v_s1p1, v_s2m1, v_s2p1, v_s3m1, v_s3n1, v_s3p1 ); v_int16x8 v_sdx2, v_sdy2; spatialGradientKernel<v_int16x8>( v_sdx2, v_sdy2, v_s1m2, v_s1n2, v_s1p2, v_s2m2, v_s2p2, v_s3m2, v_s3n2, v_s3p2 ); // Store v_store(&c_dx[j], v_sdx1); v_store(&c_dx[j+8], v_sdx2); v_store(&c_dy[j], v_sdy1); v_store(&c_dy[j+8], v_sdy2); // Load fourth row for 3x3 Sobel filter v_um = v_load(&m_src[j-1]); v_up = v_load(&m_src[j+1]); // TODO: Replace _mm_slli_si128 with hal method v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)), v_uint8x16(_mm_srli_si128(v_um.val, 1))); v_expand(v_um, v_um1, v_um2); v_expand(v_un, v_un1, v_un2); v_expand(v_up, v_up1, v_up2); v_int16x8 v_s4m1 = v_reinterpret_as_s16(v_um1); v_int16x8 v_s4m2 = v_reinterpret_as_s16(v_um2); v_int16x8 v_s4n1 = v_reinterpret_as_s16(v_un1); v_int16x8 v_s4n2 = v_reinterpret_as_s16(v_un2); v_int16x8 v_s4p1 = v_reinterpret_as_s16(v_up1); v_int16x8 v_s4p2 = v_reinterpret_as_s16(v_up2); // dx & dy for rows 2, 3, 4 spatialGradientKernel<v_int16x8>( v_sdx1, v_sdy1, v_s2m1, v_s2n1, v_s2p1, v_s3m1, v_s3p1, v_s4m1, v_s4n1, v_s4p1 ); spatialGradientKernel<v_int16x8>( v_sdx2, v_sdy2, v_s2m2, v_s2n2, v_s2p2, v_s3m2, v_s3p2, v_s4m2, v_s4n2, v_s4p2 ); // Store v_store(&n_dx[j], v_sdx1); v_store(&n_dx[j+8], v_sdx2); v_store(&n_dy[j], v_sdy1); v_store(&n_dy[j+8], v_sdy2); } } } i_start = i; j_start = j; #endif int j_p, j_n; uchar v00, v01, v02, v10, v11, v12, v20, v21, v22; for ( i = 0; i < H; i++ ) { if ( i == 0 ) p_src = src.ptr<uchar>(i_top); else p_src = src.ptr<uchar>(i-1); c_src = src.ptr<uchar>(i); if ( i == H - 1 ) n_src = src.ptr<uchar>(i_bottom); else n_src = src.ptr<uchar>(i+1); c_dx = dx.ptr<short>(i); c_dy = dy.ptr<short>(i); // Process left-most column j = 0; j_p = j + j_offl; j_n = 1; if ( j_n >= W ) j_n = j + j_offr; v00 = p_src[j_p]; v01 = p_src[j]; v02 = p_src[j_n]; v10 = c_src[j_p]; v11 = c_src[j]; v12 = c_src[j_n]; v20 = n_src[j_p]; v21 = n_src[j]; v22 = n_src[j_n]; spatialGradientKernel<short>( c_dx[0], c_dy[0], v00, v01, v02, v10, v12, v20, v21, v22 ); v00 = v01; v10 = v11; v20 = v21; v01 = v02; v11 = v12; v21 = v22; // Process middle columns j = i >= i_start ? 1 : j_start; j_p = j - 1; v00 = p_src[j_p]; v01 = p_src[j]; v10 = c_src[j_p]; v11 = c_src[j]; v20 = n_src[j_p]; v21 = n_src[j]; for ( ; j < W - 1; j++ ) { // Get values for next column j_n = j + 1; v02 = p_src[j_n]; v12 = c_src[j_n]; v22 = n_src[j_n]; spatialGradientKernel<short>( c_dx[j], c_dy[j], v00, v01, v02, v10, v12, v20, v21, v22 ); // Move values back one column for next iteration v00 = v01; v10 = v11; v20 = v21; v01 = v02; v11 = v12; v21 = v22; } // Process right-most column if ( j < W ) { j_n = j + j_offr; v02 = p_src[j_n]; v12 = c_src[j_n]; v22 = n_src[j_n]; spatialGradientKernel<short>( c_dx[j], c_dy[j], v00, v01, v02, v10, v12, v20, v21, v22 ); } } }
void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, OutputArray _inliers, int flags) { Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat(); Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat(); CV_Assert(opoints.isContinuous()); CV_Assert(opoints.depth() == CV_32F); CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3); CV_Assert(ipoints.isContinuous()); CV_Assert(ipoints.depth() == CV_32F); CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2); _rvec.create(3, 1, CV_64FC1); _tvec.create(3, 1, CV_64FC1); Mat rvec = _rvec.getMat(); Mat tvec = _tvec.getMat(); Mat objectPoints = opoints.reshape(3, 1), imagePoints = ipoints.reshape(2, 1); if (minInliersCount <= 0) minInliersCount = objectPoints.cols; cv::pnpransac::Parameters params; params.iterationsCount = iterationsCount; params.minInliersCount = minInliersCount; params.reprojectionError = reprojectionError; params.useExtrinsicGuess = useExtrinsicGuess; params.camera.init(cameraMatrix, distCoeffs); params.flags = flags; vector<int> localInliers; Mat localRvec, localTvec; rvec.copyTo(localRvec); tvec.copyTo(localTvec); if (objectPoints.cols >= pnpransac::MIN_POINTS_COUNT) { parallel_for(BlockedRange(0,iterationsCount), cv::pnpransac::PnPSolver(objectPoints, imagePoints, params, localRvec, localTvec, localInliers)); } if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT) { if (flags != CV_P3P) { int i, pointsCount = (int)localInliers.size(); Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2); for (i = 0; i < pointsCount; i++) { int index = localInliers[i]; Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1)); imagePoints.col(index).copyTo(colInlierImagePoints); Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1)); objectPoints.col(index).copyTo(colInlierObjectPoints); } solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true, flags); } localRvec.copyTo(rvec); localTvec.copyTo(tvec); if (_inliers.needed()) Mat(localInliers).copyTo(_inliers); } else { tvec.setTo(Scalar(0)); Mat R = Mat::eye(3, 3, CV_64F); Rodrigues(R, rvec); if( _inliers.needed() ) _inliers.release(); } return; }
static bool ipp_Deriv(InputArray _src, OutputArray _dst, int dx, int dy, int ksize, double scale, double delta, int borderType) { #ifdef HAVE_IPP_IW CV_INSTRUMENT_REGION_IPP() ::ipp::IwiSize size(_src.size().width, _src.size().height); IppDataType srcType = ippiGetDataType(_src.depth()); IppDataType dstType = ippiGetDataType(_dst.depth()); int channels = _src.channels(); bool useScale = false; bool useScharr = false; if(channels != _dst.channels() || channels > 1) return false; if(fabs(delta) > FLT_EPSILON || fabs(scale-1) > FLT_EPSILON) useScale = true; if(ksize <= 0) { ksize = 3; useScharr = true; } IppiMaskSize maskSize = ippiGetMaskSize(ksize, ksize); if((int)maskSize < 0) return false; #if IPP_VERSION_X100 <= 201703 // Bug with mirror wrap if(borderType == BORDER_REFLECT_101 && (ksize/2+1 > size.width || ksize/2+1 > size.height)) return false; #endif IwiDerivativeType derivType = ippiGetDerivType(dx, dy, (useScharr)?false:true); if((int)derivType < 0) return false; // Acquire data and begin processing try { Mat src = _src.getMat(); Mat dst = _dst.getMat(); ::ipp::IwiImage iwSrc = ippiGetImage(src); ::ipp::IwiImage iwDst = ippiGetImage(dst); ::ipp::IwiImage iwSrcProc = iwSrc; ::ipp::IwiImage iwDstProc = iwDst; ::ipp::IwiBorderSize borderSize(maskSize); ::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize)); if(!ippBorder) return false; if(srcType == ipp8u && dstType == ipp8u) { iwDstProc.Alloc(iwDst.m_size, ipp16s, channels); useScale = true; } else if(srcType == ipp8u && dstType == ipp32f) { iwSrc -= borderSize; iwSrcProc.Alloc(iwSrc.m_size, ipp32f, channels); CV_INSTRUMENT_FUN_IPP(::ipp::iwiScale, iwSrc, iwSrcProc, 1, 0, ::ipp::IwiScaleParams(ippAlgHintFast)); iwSrcProc += borderSize; } if(useScharr) CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterScharr, iwSrcProc, iwDstProc, derivType, maskSize, ::ipp::IwDefault(), ippBorder); else CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterSobel, iwSrcProc, iwDstProc, derivType, maskSize, ::ipp::IwDefault(), ippBorder); if(useScale) CV_INSTRUMENT_FUN_IPP(::ipp::iwiScale, iwDstProc, iwDst, scale, delta, ::ipp::IwiScaleParams(ippAlgHintFast)); } catch (::ipp::IwException) { return false; } return true; #else CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(dx); CV_UNUSED(dy); CV_UNUSED(ksize); CV_UNUSED(scale); CV_UNUSED(delta); CV_UNUSED(borderType); return false; #endif }
void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, double scale, double delta, int borderType ) { int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if (ddepth < 0) ddepth = sdepth; _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) ); #ifdef HAVE_TEGRA_OPTIMIZATION if (scale == 1.0 && delta == 0) { Mat src = _src.getMat(), dst = _dst.getMat(); if (ksize == 1 && tegra::laplace1(src, dst, borderType)) return; if (ksize == 3 && tegra::laplace3(src, dst, borderType)) return; if (ksize == 5 && tegra::laplace5(src, dst, borderType)) return; } #endif if( ksize == 1 || ksize == 3 ) { float K[2][9] = { { 0, 1, 0, 1, -4, 1, 0, 1, 0 }, { 2, 0, 2, 0, -8, 0, 2, 0, 2 } }; Mat kernel(3, 3, CV_32F, K[ksize == 3]); if( scale != 1 ) kernel *= scale; filter2D( _src, _dst, ddepth, kernel, Point(-1, -1), delta, borderType ); } else { Mat src = _src.getMat(), dst = _dst.getMat(); const size_t STRIPE_SIZE = 1 << 14; int depth = src.depth(); int ktype = std::max(CV_32F, std::max(ddepth, depth)); int wdepth = depth == CV_8U && ksize <= 5 ? CV_16S : depth <= CV_32F ? CV_32F : CV_64F; int wtype = CV_MAKETYPE(wdepth, src.channels()); Mat kd, ks; getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); if( ddepth < 0 ) ddepth = src.depth(); int dtype = CV_MAKETYPE(ddepth, src.channels()); int dy0 = std::min(std::max((int)(STRIPE_SIZE/(getElemSize(src.type())*src.cols)), 1), src.rows); Ptr<FilterEngine> fx = createSeparableLinearFilter(src.type(), wtype, kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); Ptr<FilterEngine> fy = createSeparableLinearFilter(src.type(), wtype, ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); int y = fx->start(src), dsty = 0, dy = 0; fy->start(src); const uchar* sptr = src.data + y*src.step; Mat d2x( dy0 + kd.rows - 1, src.cols, wtype ); Mat d2y( dy0 + kd.rows - 1, src.cols, wtype ); for( ; dsty < src.rows; sptr += dy0*src.step, dsty += dy ) { fx->proceed( sptr, (int)src.step, dy0, d2x.data, (int)d2x.step ); dy = fy->proceed( sptr, (int)src.step, dy0, d2y.data, (int)d2y.step ); if( dy > 0 ) { Mat dstripe = dst.rowRange(dsty, dsty + dy); d2x.rows = d2y.rows = dy; // modify the headers, which should work d2x += d2y; d2x.convertTo( dstripe, dtype, scale, delta ); } } } }
static void getSobelKernels( OutputArray _kx, OutputArray _ky, int dx, int dy, int _ksize, bool normalize, int ktype ) { int i, j, ksizeX = _ksize, ksizeY = _ksize; if( ksizeX == 1 && dx > 0 ) ksizeX = 3; if( ksizeY == 1 && dy > 0 ) ksizeY = 3; CV_Assert( ktype == CV_32F || ktype == CV_64F ); _kx.create(ksizeX, 1, ktype, -1, true); _ky.create(ksizeY, 1, ktype, -1, true); Mat kx = _kx.getMat(); Mat ky = _ky.getMat(); if( _ksize % 2 == 0 || _ksize > 31 ) CV_Error( CV_StsOutOfRange, "The kernel size must be odd and not larger than 31" ); std::vector<int> kerI(std::max(ksizeX, ksizeY) + 1); CV_Assert( dx >= 0 && dy >= 0 && dx+dy > 0 ); for( int k = 0; k < 2; k++ ) { Mat* kernel = k == 0 ? &kx : &ky; int order = k == 0 ? dx : dy; int ksize = k == 0 ? ksizeX : ksizeY; CV_Assert( ksize > order ); if( ksize == 1 ) kerI[0] = 1; else if( ksize == 3 ) { if( order == 0 ) kerI[0] = 1, kerI[1] = 2, kerI[2] = 1; else if( order == 1 ) kerI[0] = -1, kerI[1] = 0, kerI[2] = 1; else kerI[0] = 1, kerI[1] = -2, kerI[2] = 1; } else { int oldval, newval; kerI[0] = 1; for( i = 0; i < ksize; i++ ) kerI[i+1] = 0; for( i = 0; i < ksize - order - 1; i++ ) { oldval = kerI[0]; for( j = 1; j <= ksize; j++ ) { newval = kerI[j]+kerI[j-1]; kerI[j-1] = oldval; oldval = newval; } } for( i = 0; i < order; i++ ) { oldval = -kerI[0]; for( j = 1; j <= ksize; j++ ) { newval = kerI[j-1] - kerI[j]; kerI[j-1] = oldval; oldval = newval; } } } Mat temp(kernel->rows, kernel->cols, CV_32S, &kerI[0]); double scale = !normalize ? 1. : 1./(1 << (ksize-order-1)); temp.convertTo(*kernel, ktype, scale); } }
void SIFT_Impl::detectAndCompute(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints, OutputArray _descriptors, bool useProvidedKeypoints) { int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0; Mat image = _image.getMat(), mask = _mask.getMat(); if( image.empty() || image.depth() != CV_8U ) CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" ); if( !mask.empty() && mask.type() != CV_8UC1 ) CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" ); if( useProvidedKeypoints ) { firstOctave = 0; int maxOctave = INT_MIN; for( size_t i = 0; i < keypoints.size(); i++ ) { int octave, layer; float scale; unpackOctave(keypoints[i], octave, layer, scale); firstOctave = std::min(firstOctave, octave); maxOctave = std::max(maxOctave, octave); actualNLayers = std::max(actualNLayers, layer-2); } firstOctave = std::min(firstOctave, 0); CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers ); actualNOctaves = maxOctave - firstOctave + 1; } Mat base = createInitialImage(image, firstOctave < 0, (float)sigma); std::vector<Mat> gpyr, dogpyr; int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(std::log( (double)std::min( base.cols, base.rows ) ) / std::log(2.) - 2) - firstOctave; //double t, tf = getTickFrequency(); //t = (double)getTickCount(); buildGaussianPyramid(base, gpyr, nOctaves); buildDoGPyramid(gpyr, dogpyr); //t = (double)getTickCount() - t; //printf("pyramid construction time: %g\n", t*1000./tf); if( !useProvidedKeypoints ) { //t = (double)getTickCount(); findScaleSpaceExtrema(gpyr, dogpyr, keypoints); KeyPointsFilter::removeDuplicated( keypoints ); if( nfeatures > 0 ) KeyPointsFilter::retainBest(keypoints, nfeatures); //t = (double)getTickCount() - t; //printf("keypoint detection time: %g\n", t*1000./tf); if( firstOctave < 0 ) for( size_t i = 0; i < keypoints.size(); i++ ) { KeyPoint& kpt = keypoints[i]; float scale = 1.f/(float)(1 << -firstOctave); kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255); kpt.pt *= scale; kpt.size *= scale; } if( !mask.empty() ) KeyPointsFilter::runByPixelsMask( keypoints, mask ); } else { // filter keypoints by mask //KeyPointsFilter::runByPixelsMask( keypoints, mask ); } if( _descriptors.needed() ) { //t = (double)getTickCount(); int dsize = descriptorSize(); _descriptors.create((int)keypoints.size(), dsize, CV_32F); Mat descriptors = _descriptors.getMat(); calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave); //t = (double)getTickCount() - t; //printf("descriptor extraction time: %g\n", t*1000./tf); } }
void convexHull( InputArray _points, OutputArray _hull, bool clockwise, bool returnPoints ) { CV_INSTRUMENT_REGION() CV_Assert(_points.getObj() != _hull.getObj()); Mat points = _points.getMat(); int i, total = points.checkVector(2), depth = points.depth(), nout = 0; int miny_ind = 0, maxy_ind = 0; CV_Assert(total >= 0 && (depth == CV_32F || depth == CV_32S)); if( total == 0 ) { _hull.release(); return; } returnPoints = !_hull.fixedType() ? returnPoints : _hull.type() != CV_32S; bool is_float = depth == CV_32F; AutoBuffer<Point*> _pointer(total); AutoBuffer<int> _stack(total + 2), _hullbuf(total); Point** pointer = _pointer; Point2f** pointerf = (Point2f**)pointer; Point* data0 = points.ptr<Point>(); int* stack = _stack; int* hullbuf = _hullbuf; CV_Assert(points.isContinuous()); for( i = 0; i < total; i++ ) pointer[i] = &data0[i]; // sort the point set by x-coordinate, find min and max y if( !is_float ) { std::sort(pointer, pointer + total, CHullCmpPoints<int>()); for( i = 1; i < total; i++ ) { int y = pointer[i]->y; if( pointer[miny_ind]->y > y ) miny_ind = i; if( pointer[maxy_ind]->y < y ) maxy_ind = i; } } else { std::sort(pointerf, pointerf + total, CHullCmpPoints<float>()); for( i = 1; i < total; i++ ) { float y = pointerf[i]->y; if( pointerf[miny_ind]->y > y ) miny_ind = i; if( pointerf[maxy_ind]->y < y ) maxy_ind = i; } } if( pointer[0]->x == pointer[total-1]->x && pointer[0]->y == pointer[total-1]->y ) { hullbuf[nout++] = 0; } else { // upper half int *tl_stack = stack; int tl_count = !is_float ? Sklansky_( pointer, 0, maxy_ind, tl_stack, -1, 1) : Sklansky_( pointerf, 0, maxy_ind, tl_stack, -1, 1); int *tr_stack = stack + tl_count; int tr_count = !is_float ? Sklansky_( pointer, total-1, maxy_ind, tr_stack, -1, -1) : Sklansky_( pointerf, total-1, maxy_ind, tr_stack, -1, -1); // gather upper part of convex hull to output if( !clockwise ) { std::swap( tl_stack, tr_stack ); std::swap( tl_count, tr_count ); } for( i = 0; i < tl_count-1; i++ ) hullbuf[nout++] = int(pointer[tl_stack[i]] - data0); for( i = tr_count - 1; i > 0; i-- ) hullbuf[nout++] = int(pointer[tr_stack[i]] - data0); int stop_idx = tr_count > 2 ? tr_stack[1] : tl_count > 2 ? tl_stack[tl_count - 2] : -1; // lower half int *bl_stack = stack; int bl_count = !is_float ? Sklansky_( pointer, 0, miny_ind, bl_stack, 1, -1) : Sklansky_( pointerf, 0, miny_ind, bl_stack, 1, -1); int *br_stack = stack + bl_count; int br_count = !is_float ? Sklansky_( pointer, total-1, miny_ind, br_stack, 1, 1) : Sklansky_( pointerf, total-1, miny_ind, br_stack, 1, 1); if( clockwise ) { std::swap( bl_stack, br_stack ); std::swap( bl_count, br_count ); } if( stop_idx >= 0 ) { int check_idx = bl_count > 2 ? bl_stack[1] : bl_count + br_count > 2 ? br_stack[2-bl_count] : -1; if( check_idx == stop_idx || (check_idx >= 0 && pointer[check_idx]->x == pointer[stop_idx]->x && pointer[check_idx]->y == pointer[stop_idx]->y) ) { // if all the points lie on the same line, then // the bottom part of the convex hull is the mirrored top part // (except the exteme points). bl_count = MIN( bl_count, 2 ); br_count = MIN( br_count, 2 ); } } for( i = 0; i < bl_count-1; i++ ) hullbuf[nout++] = int(pointer[bl_stack[i]] - data0); for( i = br_count-1; i > 0; i-- ) hullbuf[nout++] = int(pointer[br_stack[i]] - data0); } if( !returnPoints ) Mat(nout, 1, CV_32S, hullbuf).copyTo(_hull); else { _hull.create(nout, 1, CV_MAKETYPE(depth, 2)); Mat hull = _hull.getMat(); size_t step = !hull.isContinuous() ? hull.step[0] : sizeof(Point); for( i = 0; i < nout; i++ ) *(Point*)(hull.ptr() + i*step) = data0[hullbuf[i]]; } }
void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, double scale, double delta, int borderType ) { int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if (ddepth < 0) ddepth = sdepth; _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) ); #if defined HAVE_IPP && !defined HAVE_IPP_ICV_ONLY if ((ksize == 3 || ksize == 5) && ((borderType & BORDER_ISOLATED) != 0 || !_src.isSubmatrix()) && ((stype == CV_8UC1 && ddepth == CV_16S) || (ddepth == CV_32F && stype == CV_32FC1))) { int iscale = saturate_cast<int>(scale), idelta = saturate_cast<int>(delta); bool floatScale = std::fabs(scale - iscale) > DBL_EPSILON, needScale = iscale != 1; bool floatDelta = std::fabs(delta - idelta) > DBL_EPSILON, needDelta = delta != 0; int borderTypeNI = borderType & ~BORDER_ISOLATED; Mat src = _src.getMat(), dst = _dst.getMat(); if (src.data != dst.data) { Ipp32s bufsize; IppStatus status = (IppStatus)-1; IppiSize roisize = { src.cols, src.rows }; IppiMaskSize masksize = ksize == 3 ? ippMskSize3x3 : ippMskSize5x5; IppiBorderType borderTypeIpp = ippiGetBorderType(borderTypeNI); #define IPP_FILTER_LAPLACIAN(ippsrctype, ippdsttype, ippfavor) \ do \ { \ if (borderTypeIpp >= 0 && ippiFilterLaplacianGetBufferSize_##ippfavor##_C1R(roisize, masksize, &bufsize) >= 0) \ { \ Ipp8u * buffer = ippsMalloc_8u(bufsize); \ status = ippiFilterLaplacianBorder_##ippfavor##_C1R((const ippsrctype *)src.data, (int)src.step, (ippdsttype *)dst.data, \ (int)dst.step, roisize, masksize, borderTypeIpp, 0, buffer); \ ippsFree(buffer); \ } \ } while ((void)0, 0) CV_SUPPRESS_DEPRECATED_START if (sdepth == CV_8U && ddepth == CV_16S && !floatScale && !floatDelta) { IPP_FILTER_LAPLACIAN(Ipp8u, Ipp16s, 8u16s); if (needScale && status >= 0) status = ippiMulC_16s_C1IRSfs((Ipp16s)iscale, (Ipp16s *)dst.data, (int)dst.step, roisize, 0); if (needDelta && status >= 0) status = ippiAddC_16s_C1IRSfs((Ipp16s)idelta, (Ipp16s *)dst.data, (int)dst.step, roisize, 0); } else if (sdepth == CV_32F && ddepth == CV_32F) { IPP_FILTER_LAPLACIAN(Ipp32f, Ipp32f, 32f); if (needScale && status >= 0) status = ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, roisize); if (needDelta && status >= 0) status = ippiAddC_32f_C1IR((Ipp32f)delta, (Ipp32f *)dst.data, (int)dst.step, roisize); } CV_SUPPRESS_DEPRECATED_END if (status >= 0) return; }
void calcMotionGradient( InputArray _mhi, OutputArray _mask, OutputArray _orientation, double delta1, double delta2, int aperture_size ) { static int runcase = 0; runcase++; Mat mhi = _mhi.getMat(); Size size = mhi.size(); _mask.create(size, CV_8U); _orientation.create(size, CV_32F); Mat mask = _mask.getMat(); Mat orient = _orientation.getMat(); if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 ) CV_Error( Error::StsOutOfRange, "aperture_size must be 3, 5 or 7" ); if( delta1 <= 0 || delta2 <= 0 ) CV_Error( Error::StsOutOfRange, "both delta's must be positive" ); if( mhi.type() != CV_32FC1 ) CV_Error( Error::StsUnsupportedFormat, "MHI must be single-channel floating-point images" ); if( orient.data == mhi.data ) { _orientation.release(); _orientation.create(size, CV_32F); orient = _orientation.getMat(); } if( delta1 > delta2 ) std::swap(delta1, delta2); float gradient_epsilon = 1e-4f * aperture_size * aperture_size; float min_delta = (float)delta1; float max_delta = (float)delta2; Mat dX_min, dY_max; // calc Dx and Dy Sobel( mhi, dX_min, CV_32F, 1, 0, aperture_size, 1, 0, BORDER_REPLICATE ); Sobel( mhi, dY_max, CV_32F, 0, 1, aperture_size, 1, 0, BORDER_REPLICATE ); int x, y; if( mhi.isContinuous() && orient.isContinuous() && mask.isContinuous() ) { size.width *= size.height; size.height = 1; } // calc gradient for( y = 0; y < size.height; y++ ) { const float* dX_min_row = dX_min.ptr<float>(y); const float* dY_max_row = dY_max.ptr<float>(y); float* orient_row = orient.ptr<float>(y); uchar* mask_row = mask.ptr<uchar>(y); hal::fastAtan2(dY_max_row, dX_min_row, orient_row, size.width, true); // make orientation zero where the gradient is very small for( x = 0; x < size.width; x++ ) { float dY = dY_max_row[x]; float dX = dX_min_row[x]; if( std::abs(dX) < gradient_epsilon && std::abs(dY) < gradient_epsilon ) { mask_row[x] = (uchar)0; orient_row[x] = 0.f; } else mask_row[x] = (uchar)1; } } erode( mhi, dX_min, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE ); dilate( mhi, dY_max, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE ); // mask off pixels which have little motion difference in their neighborhood for( y = 0; y < size.height; y++ ) { const float* dX_min_row = dX_min.ptr<float>(y); const float* dY_max_row = dY_max.ptr<float>(y); float* orient_row = orient.ptr<float>(y); uchar* mask_row = mask.ptr<uchar>(y); for( x = 0; x < size.width; x++ ) { float d0 = dY_max_row[x] - dX_min_row[x]; if( mask_row[x] == 0 || d0 < min_delta || max_delta < d0 ) { mask_row[x] = (uchar)0; orient_row[x] = 0.f; } } } }
/* dst = src */ void Mat::copyTo( OutputArray _dst ) const { int dtype = _dst.type(); if( _dst.fixedType() && dtype != type() ) { CV_Assert( channels() == CV_MAT_CN(dtype) ); convertTo( _dst, dtype ); return; } if( empty() ) { _dst.release(); return; } if( _dst.isUMat() ) { _dst.create( dims, size.p, type() ); UMat dst = _dst.getUMat(); size_t i, sz[CV_MAX_DIM], dstofs[CV_MAX_DIM], esz = elemSize(); for( i = 0; i < (size_t)dims; i++ ) sz[i] = size.p[i]; sz[dims-1] *= esz; dst.ndoffset(dstofs); dstofs[dims-1] *= esz; dst.u->currAllocator->upload(dst.u, data, dims, sz, dstofs, dst.step.p, step.p); return; } if( dims <= 2 ) { _dst.create( rows, cols, type() ); Mat dst = _dst.getMat(); if( data == dst.data ) return; if( rows > 0 && cols > 0 ) { const uchar* sptr = data; uchar* dptr = dst.data; Size sz = getContinuousSize(*this, dst); size_t len = sz.width*elemSize(); for( ; sz.height--; sptr += step, dptr += dst.step ) memcpy( dptr, sptr, len ); } return; } _dst.create( dims, size, type() ); Mat dst = _dst.getMat(); if( data == dst.data ) return; if( total() != 0 ) { const Mat* arrays[] = { this, &dst }; uchar* ptrs[2]; NAryMatIterator it(arrays, ptrs, 2); size_t sz = it.size*elemSize(); for( size_t i = 0; i < it.nplanes; i++, ++it ) memcpy(ptrs[1], ptrs[0], sz); } }
void cv::cuda::meanShiftSegmentation(InputArray _src, OutputArray _dst, int sp, int sr, int minsize, TermCriteria criteria) { GpuMat src = _src.getGpuMat(); CV_Assert( src.type() == CV_8UC4 ); const int nrows = src.rows; const int ncols = src.cols; const int hr = sr; const int hsp = sp; // Perform mean shift procedure and obtain region and spatial maps GpuMat d_rmap, d_spmap; cuda::meanShiftProc(src, d_rmap, d_spmap, sp, sr, criteria); Mat rmap(d_rmap); Mat spmap(d_spmap); Graph<SegmLinkVal> g(nrows * ncols, 4 * (nrows - 1) * (ncols - 1) + (nrows - 1) + (ncols - 1)); // Make region adjacent graph from image Vec4b r1; Vec4b r2[4]; Vec2s sp1; Vec2s sp2[4]; int dr[4]; int dsp[4]; for (int y = 0; y < nrows - 1; ++y) { Vec4b* ry = rmap.ptr<Vec4b>(y); Vec4b* ryp = rmap.ptr<Vec4b>(y + 1); Vec2s* spy = spmap.ptr<Vec2s>(y); Vec2s* spyp = spmap.ptr<Vec2s>(y + 1); for (int x = 0; x < ncols - 1; ++x) { r1 = ry[x]; sp1 = spy[x]; r2[0] = ry[x + 1]; r2[1] = ryp[x]; r2[2] = ryp[x + 1]; r2[3] = ryp[x]; sp2[0] = spy[x + 1]; sp2[1] = spyp[x]; sp2[2] = spyp[x + 1]; sp2[3] = spyp[x]; dr[0] = dist2(r1, r2[0]); dr[1] = dist2(r1, r2[1]); dr[2] = dist2(r1, r2[2]); dsp[0] = dist2(sp1, sp2[0]); dsp[1] = dist2(sp1, sp2[1]); dsp[2] = dist2(sp1, sp2[2]); r1 = ry[x + 1]; sp1 = spy[x + 1]; dr[3] = dist2(r1, r2[3]); dsp[3] = dist2(sp1, sp2[3]); g.addEdge(pix(y, x, ncols), pix(y, x + 1, ncols), SegmLinkVal(dr[0], dsp[0])); g.addEdge(pix(y, x, ncols), pix(y + 1, x, ncols), SegmLinkVal(dr[1], dsp[1])); g.addEdge(pix(y, x, ncols), pix(y + 1, x + 1, ncols), SegmLinkVal(dr[2], dsp[2])); g.addEdge(pix(y, x + 1, ncols), pix(y + 1, x, ncols), SegmLinkVal(dr[3], dsp[3])); } } for (int y = 0; y < nrows - 1; ++y) { r1 = rmap.at<Vec4b>(y, ncols - 1); r2[0] = rmap.at<Vec4b>(y + 1, ncols - 1); sp1 = spmap.at<Vec2s>(y, ncols - 1); sp2[0] = spmap.at<Vec2s>(y + 1, ncols - 1); dr[0] = dist2(r1, r2[0]); dsp[0] = dist2(sp1, sp2[0]); g.addEdge(pix(y, ncols - 1, ncols), pix(y + 1, ncols - 1, ncols), SegmLinkVal(dr[0], dsp[0])); } for (int x = 0; x < ncols - 1; ++x) { r1 = rmap.at<Vec4b>(nrows - 1, x); r2[0] = rmap.at<Vec4b>(nrows - 1, x + 1); sp1 = spmap.at<Vec2s>(nrows - 1, x); sp2[0] = spmap.at<Vec2s>(nrows - 1, x + 1); dr[0] = dist2(r1, r2[0]); dsp[0] = dist2(sp1, sp2[0]); g.addEdge(pix(nrows - 1, x, ncols), pix(nrows - 1, x + 1, ncols), SegmLinkVal(dr[0], dsp[0])); } DjSets comps(g.numv); // Find adjacent components for (int v = 0; v < g.numv; ++v) { for (int e_it = g.start[v]; e_it != -1; e_it = g.edges[e_it].next) { int c1 = comps.find(v); int c2 = comps.find(g.edges[e_it].to); if (c1 != c2 && g.edges[e_it].val.dr < hr && g.edges[e_it].val.dsp < hsp) comps.merge(c1, c2); } } std::vector<SegmLink> edges; edges.reserve(g.numv); // Prepare edges connecting differnet components for (int v = 0; v < g.numv; ++v) { int c1 = comps.find(v); for (int e_it = g.start[v]; e_it != -1; e_it = g.edges[e_it].next) { int c2 = comps.find(g.edges[e_it].to); if (c1 != c2) edges.push_back(SegmLink(c1, c2, g.edges[e_it].val)); } } // Sort all graph's edges connecting differnet components (in asceding order) std::sort(edges.begin(), edges.end()); // Exclude small components (starting from the nearest couple) for (size_t i = 0; i < edges.size(); ++i) { int c1 = comps.find(edges[i].from); int c2 = comps.find(edges[i].to); if (c1 != c2 && (comps.size[c1] < minsize || comps.size[c2] < minsize)) comps.merge(c1, c2); } // Compute sum of the pixel's colors which are in the same segment Mat h_src(src); std::vector<Vec4i> sumcols(nrows * ncols, Vec4i(0, 0, 0, 0)); for (int y = 0; y < nrows; ++y) { Vec4b* h_srcy = h_src.ptr<Vec4b>(y); for (int x = 0; x < ncols; ++x) { int parent = comps.find(pix(y, x, ncols)); Vec4b col = h_srcy[x]; Vec4i& sumcol = sumcols[parent]; sumcol[0] += col[0]; sumcol[1] += col[1]; sumcol[2] += col[2]; } } // Create final image, color of each segment is the average color of its pixels _dst.create(src.size(), src.type()); Mat dst = _dst.getMat(); for (int y = 0; y < nrows; ++y) { Vec4b* dsty = dst.ptr<Vec4b>(y); for (int x = 0; x < ncols; ++x) { int parent = comps.find(pix(y, x, ncols)); const Vec4i& sumcol = sumcols[parent]; Vec4b& dstcol = dsty[x]; dstcol[0] = static_cast<uchar>(sumcol[0] / comps.size[parent]); dstcol[1] = static_cast<uchar>(sumcol[1] / comps.size[parent]); dstcol[2] = static_cast<uchar>(sumcol[2] / comps.size[parent]); dstcol[3] = 255; } } }
void process(InputArrayOfArrays src, OutputArray dst) { std::vector<Mat> images; src.getMatVector(images); checkImageDimensions(images); int channels = images[0].channels(); CV_Assert(channels == 1 || channels == 3); Size size = images[0].size(); int CV_32FCC = CV_MAKETYPE(CV_32F, channels); std::vector<Mat> weights(images.size()); Mat weight_sum = Mat::zeros(size, CV_32F); for(size_t i = 0; i < images.size(); i++) { Mat img, gray, contrast, saturation, wellexp; std::vector<Mat> splitted(channels); images[i].convertTo(img, CV_32F, 1.0f/255.0f); if(channels == 3) { cvtColor(img, gray, COLOR_RGB2GRAY); } else { img.copyTo(gray); } split(img, splitted); Laplacian(gray, contrast, CV_32F); contrast = abs(contrast); Mat mean = Mat::zeros(size, CV_32F); for(int c = 0; c < channels; c++) { mean += splitted[c]; } mean /= channels; saturation = Mat::zeros(size, CV_32F); for(int c = 0; c < channels; c++) { Mat deviation = splitted[c] - mean; pow(deviation, 2.0f, deviation); saturation += deviation; } sqrt(saturation, saturation); wellexp = Mat::ones(size, CV_32F); for(int c = 0; c < channels; c++) { Mat exp = splitted[c] - 0.5f; pow(exp, 2.0f, exp); exp = -exp / 0.08f; wellexp = wellexp.mul(exp); } pow(contrast, wcon, contrast); pow(saturation, wsat, saturation); pow(wellexp, wexp, wellexp); weights[i] = contrast; if(channels == 3) { weights[i] = weights[i].mul(saturation); } weights[i] = weights[i].mul(wellexp); weight_sum += weights[i]; } int maxlevel = static_cast<int>(logf(static_cast<float>(min(size.width, size.height))) / logf(2.0f)); std::vector<Mat> res_pyr(maxlevel + 1); for(size_t i = 0; i < images.size(); i++) { weights[i] /= weight_sum; Mat img; images[i].convertTo(img, CV_32F, 1.0f/255.0f); std::vector<Mat> img_pyr, weight_pyr; buildPyramid(img, img_pyr, maxlevel); buildPyramid(weights[i], weight_pyr, maxlevel); for(int lvl = 0; lvl < maxlevel; lvl++) { Mat up; pyrUp(img_pyr[lvl + 1], up, img_pyr[lvl].size()); img_pyr[lvl] -= up; } for(int lvl = 0; lvl <= maxlevel; lvl++) { std::vector<Mat> splitted(channels); split(img_pyr[lvl], splitted); for(int c = 0; c < channels; c++) { splitted[c] = splitted[c].mul(weight_pyr[lvl]); } merge(splitted, img_pyr[lvl]); if(res_pyr[lvl].empty()) { res_pyr[lvl] = img_pyr[lvl]; } else { res_pyr[lvl] += img_pyr[lvl]; } } } for(int lvl = maxlevel; lvl > 0; lvl--) { Mat up; pyrUp(res_pyr[lvl], up, res_pyr[lvl - 1].size()); res_pyr[lvl - 1] += up; } dst.create(size, CV_32FCC); res_pyr[0].copyTo(dst.getMat()); }
void BackgroundSubtractorGMG::operator()(InputArray _image, OutputArray _fgmask, double newLearningRate) { if (!isDataInitialized) { CV_Error(CV_StsError,"BackgroundSubstractorGMG has not been initialized. Call initialize() first.\n"); } /* * Update learning rate parameter, if desired */ if (newLearningRate != -1.0) { if (newLearningRate < 0.0 || newLearningRate > 1.0) { CV_Error(CV_StsOutOfRange,"Learning rate for Operator () must be between 0.0 and 1.0.\n"); } this->learningRate = newLearningRate; } Mat image = _image.getMat(); _fgmask.create(imHeight,imWidth,CV_8U); fgMaskImage = _fgmask.getMat(); // 8-bit unsigned mask. 255 for FG, 0 for BG /* * Iterate over pixels in image */ // grab data at each pixel (1,2,3 channels, int, float, etc.) // grab data as an array of bytes. Then, send that array to a function that reads data into vector of appropriate types... and quantizing... before saving as a feature, which is a vector of flexitypes, so code can be portable. // multiple channels do have sequential storage, use mat::elemSize() and mat::elemSize1() vector<PixelModelGMG>::iterator pixel; vector<PixelModelGMG>::iterator pixel_end = pixels.end(); size_t i; //#pragma omp parallel for (i = 0, pixel=pixels.begin(); pixel != pixel_end; ++i,++pixel) { HistogramFeatureGMG newFeature; newFeature.color.clear(); int irow = int(i / imWidth); int icol = i % imWidth; for (size_t c = 0; c < numChannels; ++c) { /* * Perform quantization. in each channel. (color-min)*(levels)/(max-min). * Shifts min to 0 and scales, finally casting to an int. */ double color; switch(image.depth()) { case CV_8U: color = image.ptr<uchar>(irow)[icol * numChannels + c]; break; case CV_8S: color = image.ptr<schar>(irow)[icol * numChannels + c]; break; case CV_16U: color = image.ptr<ushort>(irow)[icol * numChannels + c]; break; case CV_16S: color = image.ptr<short>(irow)[icol * numChannels + c]; break; case CV_32S: color = image.ptr<int>(irow)[icol * numChannels + c]; break; case CV_32F: color = image.ptr<float>(irow)[icol * numChannels + c]; break; case CV_64F: color = image.ptr<double>(irow)[icol * numChannels + c]; break; default: color = 0; break; } size_t quantizedColor = (size_t)((color-minVal)*quantizationLevels/(maxVal-minVal)); newFeature.color.push_back(quantizedColor); } // now that the feature is ready for use, put it in the histogram if (frameNum > numInitializationFrames) // typical operation { newFeature.likelihood = float(learningRate); /* * (1) Query histogram to find posterior probability of feature under model. */ float likelihood = (float)pixel->getLikelihood(newFeature); // see Godbehere, Matsukawa, Goldberg (2012) for reasoning behind this implementation of Bayes rule float posterior = float((likelihood*backgroundPrior)/(likelihood*backgroundPrior+(1-likelihood)*(1-backgroundPrior))); /* * (2) feed posterior probability into the posterior image */ int row,col; col = i%imWidth; row = int(i-col)/imWidth; posteriorImage.at<float>(row,col) = (1.0f-posterior); } pixel->setLastObservedFeature(newFeature); } /* * (3) Perform filtering and threshold operations to yield final mask image. * * 2 options. First is morphological open/close as before. Second is "median filtering" which Jon Barron says is good to remove noise */ Mat thresholdedPosterior; threshold(posteriorImage,thresholdedPosterior,decisionThreshold,1.0,THRESH_BINARY); thresholdedPosterior.convertTo(fgMaskImage,CV_8U,255); // convert image to integer space for further filtering and mask creation medianBlur(fgMaskImage,fgMaskImage,smoothingRadius); fgMaskImage.copyTo(_fgmask); ++frameNum; // keep track of how many frames we have processed }
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response) { std::vector<Mat> images; src.getMatVector(images); Mat times = _times.getMat(); CV_Assert(images.size() == times.total()); checkImageDimensions(images); CV_Assert(images[0].depth() == CV_8U); int channels = images[0].channels(); Size size = images[0].size(); int CV_32FCC = CV_MAKETYPE(CV_32F, channels); dst.create(images[0].size(), CV_32FCC); Mat result = dst.getMat(); Mat response = input_response.getMat(); if(response.empty()) { response = linearResponse(channels); response.at<Vec3f>(0) = response.at<Vec3f>(1); } log(response, response); CV_Assert(response.rows == LDR_SIZE && response.cols == 1 && response.channels() == channels); Mat exp_values(times); log(exp_values, exp_values); result = Mat::zeros(size, CV_32FCC); std::vector<Mat> result_split; split(result, result_split); Mat weight_sum = Mat::zeros(size, CV_32F); for(size_t i = 0; i < images.size(); i++) { std::vector<Mat> splitted; split(images[i], splitted); Mat w = Mat::zeros(size, CV_32F); for(int c = 0; c < channels; c++) { LUT(splitted[c], weights, splitted[c]); w += splitted[c]; } w /= channels; Mat response_img; LUT(images[i], response, response_img); split(response_img, splitted); for(int c = 0; c < channels; c++) { result_split[c] += w.mul(splitted[c] - exp_values.at<float>((int)i)); } weight_sum += w; } weight_sum = 1.0f / weight_sum; for(int c = 0; c < channels; c++) { result_split[c] = result_split[c].mul(weight_sum); } merge(result_split, result); exp(result, result); }
void divSpectrums( InputArray _srcA, InputArray _srcB, OutputArray _dst, int flags, bool conjB) { Mat srcA = _srcA.getMat(), srcB = _srcB.getMat(); int depth = srcA.depth(), cn = srcA.channels(), type = srcA.type(); int rows = srcA.rows, cols = srcA.cols; int j, k; CV_Assert( type == srcB.type() && srcA.size() == srcB.size() ); CV_Assert( type == CV_32FC1 || type == CV_32FC2 || type == CV_64FC1 || type == CV_64FC2 ); _dst.create( srcA.rows, srcA.cols, type ); Mat dst = _dst.getMat(); bool is_1d = (flags & DFT_ROWS) || (rows == 1 || (cols == 1 && srcA.isContinuous() && srcB.isContinuous() && dst.isContinuous())); if( is_1d && !(flags & DFT_ROWS) ) cols = cols + rows - 1, rows = 1; int ncols = cols*cn; int j0 = cn == 1; int j1 = ncols - (cols % 2 == 0 && cn == 1); if( depth == CV_32F ) { const float* dataA = srcA.ptr<float>(); const float* dataB = srcB.ptr<float>(); float* dataC = dst.ptr<float>(); float eps = FLT_EPSILON; // prevent div0 problems size_t stepA = srcA.step/sizeof(dataA[0]); size_t stepB = srcB.step/sizeof(dataB[0]); size_t stepC = dst.step/sizeof(dataC[0]); if( !is_1d && cn == 1 ) { for( k = 0; k < (cols % 2 ? 1 : 2); k++ ) { if( k == 1 ) dataA += cols - 1, dataB += cols - 1, dataC += cols - 1; dataC[0] = dataA[0] / (dataB[0] + eps); if( rows % 2 == 0 ) dataC[(rows-1)*stepC] = dataA[(rows-1)*stepA] / (dataB[(rows-1)*stepB] + eps); if( !conjB ) for( j = 1; j <= rows - 2; j += 2 ) { double denom = (double)dataB[j*stepB]*dataB[j*stepB] + (double)dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + (double)eps; double re = (double)dataA[j*stepA]*dataB[j*stepB] + (double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; double im = (double)dataA[(j+1)*stepA]*dataB[j*stepB] - (double)dataA[j*stepA]*dataB[(j+1)*stepB]; dataC[j*stepC] = (float)(re / denom); dataC[(j+1)*stepC] = (float)(im / denom); } else for( j = 1; j <= rows - 2; j += 2 ) { double denom = (double)dataB[j*stepB]*dataB[j*stepB] + (double)dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + (double)eps; double re = (double)dataA[j*stepA]*dataB[j*stepB] - (double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; double im = (double)dataA[(j+1)*stepA]*dataB[j*stepB] + (double)dataA[j*stepA]*dataB[(j+1)*stepB]; dataC[j*stepC] = (float)(re / denom); dataC[(j+1)*stepC] = (float)(im / denom); } if( k == 1 ) dataA -= cols - 1, dataB -= cols - 1, dataC -= cols - 1; } } for( ; rows--; dataA += stepA, dataB += stepB, dataC += stepC ) { if( is_1d && cn == 1 ) { dataC[0] = dataA[0] / (dataB[0] + eps); if( cols % 2 == 0 ) dataC[j1] = dataA[j1] / (dataB[j1] + eps); } if( !conjB ) for( j = j0; j < j1; j += 2 ) { double denom = (double)(dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps); double re = (double)(dataA[j]*dataB[j] + dataA[j+1]*dataB[j+1]); double im = (double)(dataA[j+1]*dataB[j] - dataA[j]*dataB[j+1]); dataC[j] = (float)(re / denom); dataC[j+1] = (float)(im / denom); } else for( j = j0; j < j1; j += 2 ) { double denom = (double)(dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps); double re = (double)(dataA[j]*dataB[j] - dataA[j+1]*dataB[j+1]); double im = (double)(dataA[j+1]*dataB[j] + dataA[j]*dataB[j+1]); dataC[j] = (float)(re / denom); dataC[j+1] = (float)(im / denom); } } } else { const double* dataA = srcA.ptr<double>(); const double* dataB = srcB.ptr<double>(); double* dataC = dst.ptr<double>(); double eps = DBL_EPSILON; // prevent div0 problems size_t stepA = srcA.step/sizeof(dataA[0]); size_t stepB = srcB.step/sizeof(dataB[0]); size_t stepC = dst.step/sizeof(dataC[0]); if( !is_1d && cn == 1 ) { for( k = 0; k < (cols % 2 ? 1 : 2); k++ ) { if( k == 1 ) dataA += cols - 1, dataB += cols - 1, dataC += cols - 1; dataC[0] = dataA[0] / (dataB[0] + eps); if( rows % 2 == 0 ) dataC[(rows-1)*stepC] = dataA[(rows-1)*stepA] / (dataB[(rows-1)*stepB] + eps); if( !conjB ) for( j = 1; j <= rows - 2; j += 2 ) { double denom = dataB[j*stepB]*dataB[j*stepB] + dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + eps; double re = dataA[j*stepA]*dataB[j*stepB] + dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; double im = dataA[(j+1)*stepA]*dataB[j*stepB] - dataA[j*stepA]*dataB[(j+1)*stepB]; dataC[j*stepC] = re / denom; dataC[(j+1)*stepC] = im / denom; } else for( j = 1; j <= rows - 2; j += 2 ) { double denom = dataB[j*stepB]*dataB[j*stepB] + dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + eps; double re = dataA[j*stepA]*dataB[j*stepB] - dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; double im = dataA[(j+1)*stepA]*dataB[j*stepB] + dataA[j*stepA]*dataB[(j+1)*stepB]; dataC[j*stepC] = re / denom; dataC[(j+1)*stepC] = im / denom; } if( k == 1 ) dataA -= cols - 1, dataB -= cols - 1, dataC -= cols - 1; } } for( ; rows--; dataA += stepA, dataB += stepB, dataC += stepC ) { if( is_1d && cn == 1 ) { dataC[0] = dataA[0] / (dataB[0] + eps); if( cols % 2 == 0 ) dataC[j1] = dataA[j1] / (dataB[j1] + eps); } if( !conjB ) for( j = j0; j < j1; j += 2 ) { double denom = dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps; double re = dataA[j]*dataB[j] + dataA[j+1]*dataB[j+1]; double im = dataA[j+1]*dataB[j] - dataA[j]*dataB[j+1]; dataC[j] = re / denom; dataC[j+1] = im / denom; } else for( j = j0; j < j1; j += 2 ) { double denom = dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps; double re = dataA[j]*dataB[j] - dataA[j+1]*dataB[j+1]; double im = dataA[j+1]*dataB[j] + dataA[j]*dataB[j+1]; dataC[j] = re / denom; dataC[j+1] = im / denom; } } } }
static bool openvx_sobel(InputArray _src, OutputArray _dst, int dx, int dy, int ksize, double scale, double delta, int borderType) { if (_src.type() != CV_8UC1 || _dst.type() != CV_16SC1 || ksize != 3 || scale != 1.0 || delta != 0.0 || (dx | dy) != 1 || (dx + dy) != 1 || _src.cols() < ksize || _src.rows() < ksize || ovx::skipSmallImages<VX_KERNEL_SOBEL_3x3>(_src.cols(), _src.rows()) ) return false; Mat src = _src.getMat(); Mat dst = _dst.getMat(); if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix()) return false; //Process isolated borders only vx_enum border; switch (borderType & ~BORDER_ISOLATED) { case BORDER_CONSTANT: border = VX_BORDER_CONSTANT; break; case BORDER_REPLICATE: // border = VX_BORDER_REPLICATE; // break; default: return false; } try { ivx::Context ctx = ovx::getOpenVXContext(); //if ((vx_size)ksize > ctx.convolutionMaxDimension()) // return false; Mat a; if (dst.data != src.data) a = src; else src.copyTo(a); ivx::Image ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8, ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data), ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_S16, ivx::Image::createAddressing(dst.cols, dst.rows, 2, (vx_int32)(dst.step)), dst.data); //ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments //since OpenVX standard says nothing about thread-safety for now ivx::border_t prevBorder = ctx.immediateBorder(); ctx.setImmediateBorder(border, (vx_uint8)(0)); if(dx) ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, ib, NULL)); else ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, NULL, ib)); ctx.setImmediateBorder(prevBorder); } catch (ivx::RuntimeError & e) { VX_DbgThrow(e.what()); } catch (ivx::WrapperError & e) { VX_DbgThrow(e.what()); } return true; }
void cv::merge(const Mat* mv, size_t n, OutputArray _dst) { CV_Assert( mv && n > 0 ); int depth = mv[0].depth(); bool allch1 = true; int k, cn = 0; size_t i; for( i = 0; i < n; i++ ) { CV_Assert(mv[i].size == mv[0].size && mv[i].depth() == depth); allch1 = allch1 && mv[i].channels() == 1; cn += mv[i].channels(); } CV_Assert( 0 < cn && cn <= CV_CN_MAX ); _dst.create(mv[0].dims, mv[0].size, CV_MAKETYPE(depth, cn)); Mat dst = _dst.getMat(); if( n == 1 ) { mv[0].copyTo(dst); return; } if( !allch1 ) { AutoBuffer<int> pairs(cn*2); int j, ni=0; for( i = 0, j = 0; i < n; i++, j += ni ) { ni = mv[i].channels(); for( k = 0; k < ni; k++ ) { pairs[(j+k)*2] = j + k; pairs[(j+k)*2+1] = j + k; } } mixChannels( mv, n, &dst, 1, &pairs[0], cn ); return; } size_t esz = dst.elemSize(), esz1 = dst.elemSize1(); int blocksize0 = (int)((BLOCK_SIZE + esz-1)/esz); AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16); const Mat** arrays = (const Mat**)(uchar*)_buf; uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16); arrays[0] = &dst; for( k = 0; k < cn; k++ ) arrays[k+1] = &mv[k]; NAryMatIterator it(arrays, ptrs, cn+1); int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0); MergeFunc func = mergeTab[depth]; for( i = 0; i < it.nplanes; i++, ++it ) { for( int j = 0; j < total; j += blocksize ) { int bsz = std::min(total - j, blocksize); func( (const uchar**)&ptrs[1], ptrs[0], bsz, cn ); if( j + blocksize < total ) { ptrs[0] += bsz*esz; for( int k = 0; k < cn; k++ ) ptrs[k+1] += bsz*esz1; } } } }
/** * Function that computes the Harris responses in a * 2*r x 2*r patch at given points in the image */ static void HarrisResponses(InputArray _img, InputArray _diff_x, InputArray _diff_y, std::vector<KeyPoint>& pts, OutputArray _response, int r, float harris_k) { size_t ptidx, ptsize = pts.size(); // Get mats Mat img = _img.getMat(), diff_x = _diff_x.getMat(), diff_y = _diff_y.getMat(), response; CV_Assert( img.type() == CV_8UC1 ); bool compute_response = _response.needed(); if (compute_response) response = _response.getMat(); const int* dx00 = diff_x.ptr<int>(); const int* dy00 = diff_y.ptr<int>(); float* r00 = response.ptr<float>(); int step = diff_x.step1(); int r_step = response.step1(); for( ptidx = 0; ptidx < ptsize; ptidx++ ) { float kp_x = pts[ptidx].pt.x; float kp_y = pts[ptidx].pt.y; int x0 = (int)kp_x; int y0 = (int)kp_y; float xd = 2; float yd = 2; //float xd = 0.5; //float yd = 0.5; const int* dx0 = dx00 + (y0)*step + x0; const int* dy0 = dy00 + (y0)*step + x0; int a = 0, b = 0, c = 0, d = 0; float* r0 = r00 + ptidx*r_step; for( int i = -r; i < r; i++ ) { for( int j = -r; j < r; j++ ) { const int ofs = i*step + j; const int* dx = dx0 + ofs; const int* dy = dy0 + ofs; const int Ix = (float)dx[-1]*(xd) + (float)dx[0] + (float)dx[1]*xd + (float)dx[-step]*(yd) + (float)dx[step]*yd; const int Iy = (float)dy[-1]*(xd) + (float)dy[0] + (float)dy[1]*xd + (float)dy[-step]*(yd) + (float)dy[step]*yd; a += (Ix*Ix); b += (Iy*Iy); c += (Ix*Iy); d += Ix; } } if (compute_response) { r0[0] = (float)a; r0[1] = (float)b; r0[2] = (float)c; r0[3] = (float)d; } else pts[ptidx].response = ((float)a * b - (float)c * c - harris_k * ((float)a + b) * ((float)a + b)); } }