static bool transform(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _lut, const int tilesX, const int tilesY, const cv::Size & tileSize) { cv::ocl::Kernel k("transform", cv::ocl::imgproc::clahe_oclsrc); if(k.empty()) return false; int tile_size[2]; tile_size[0] = tileSize.width; tile_size[1] = tileSize.height; cv::UMat src = _src.getUMat(); _dst.create(src.size(), src.type()); cv::UMat dst = _dst.getUMat(); cv::UMat lut = _lut.getUMat(); size_t localThreads[3] = { 32, 8, 1 }; size_t globalThreads[3] = { src.cols, src.rows, 1 }; int idx = 0; idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(src)); idx = k.set(idx, cv::ocl::KernelArg::WriteOnlyNoSize(dst)); idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(lut)); idx = k.set(idx, src.cols); idx = k.set(idx, src.rows); idx = k.set(idx, tile_size); idx = k.set(idx, tilesX); k.set(idx, tilesY); return k.run(2, globalThreads, localThreads, false); }
inline void bs::pattern::shiftPattern(cv::InputArray src, cv::OutputArray dst, const int direction, const int shift) { cv::Mat d; cv::copyMakeBorder(src, d, shift, shift, shift, shift, cv::BORDER_WRAP); const auto direc = static_cast<Direction>(direction); switch (direc) { default: break; case Top: d = d(cv::Rect(cv::Point(shift, 2 * shift), src.size())); break; case Bottom: d = d(cv::Rect(cv::Point(shift, 0), src.size())); break; case Left: d = d(cv::Rect(cv::Point(2 * shift, shift), src.size())); break; case Right: d = d(cv::Rect(cv::Point(0, shift), src.size())); break; } d.copyTo(dst); return; }
FilterCall::FilterCall(cv::InputArray in, cv::InputArray out, impl::CallMetaData data, QString type, QString description, QString requestedView) : Call{ data, std::move(type), std::move(description), std::move(requestedView) }, input_{ in.getMat().clone() }, output_{ out.getMat().clone() } { }
void sadTemplate(cv::InputArray tar, cv::InputArray tmp, cv::OutputArray res, int *minx, int *miny){ //引数の入力をMatとして受け取る cv::Mat tarM = tar.getMat(); cv::Mat tmpM = tmp.getMat(); cv::Mat resM = res.getMat(); //sadが最小値のところがマッチングしたい箇所なので int minsad = std::numeric_limits<int>::max(); int sad = 0; //各回のsadを格納 int diff; //sadに加算する前の作業変数 int tarx,tary; //目的のxy座標 for(int y=0;y<tarM.rows - tmpM.rows;y++){ for(int x=0;x<tarM.cols - tmpM.cols;x++){ sad = 0; //次の領域の計算の前に初期化 //探索 for(int yt = 0; yt < tmpM.rows; yt++){ for(int xt = 0; xt < tmpM.cols; xt++){ diff = (int)(tarM.at<uchar>(y+yt,x+xt) - tmpM.at<uchar>(yt,xt)); if(diff < 0){ //負なら正に変換 diff = -diff; } sad += diff; ////残差逐次検定法 if(sad > minsad){ yt = tmpM.rows; break; } } } //探索結果:sadが今までで最小なら if(sad < minsad){ minsad = sad; //最小値を更新 //目的のxyを格納 tarx = x; tary = y; } } } //outputに出力 for(int y=0;y<resM.rows;y++){ for(int x=0;x<resM.cols;x++){ if(x==tarx && y==tary){ resM.at<uchar>(y,x) = (uchar)0; }else{ resM.at<uchar>(y,x) = (uchar)255; } } } std::cout << "最小値=" << minsad << std::endl; std::cout << "最小点=[" << tarx << ", " << tary << "]" << std::endl; *minx = tarx; *miny = tary; }
void warmify(cv::InputArray src, cv::OutputArray dst, uchar delta) { CV_Assert(src.type() == CV_8UC3); Mat imgSrc = src.getMat(); CV_Assert(imgSrc.data); dst.create(src.size(), CV_8UC3); Mat imgDst = dst.getMat(); imgDst = imgSrc + Scalar(0, delta, delta); }
void stereo::stereoMatching(cv::InputArray _recImage1, cv::InputArray _recIamge2, cv::OutputArray _disparityMap, int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2) { Mat img1 = _recImage1.getMat(); Mat img2 = _recIamge2.getMat(); _disparityMap.create(img1.size(), CV_16S); Mat dis = _disparityMap.getMat(); StereoSGBM matcher(minDisparity, numDisparities, SADWindowSize, P1, P2); matcher(img1, img2, dis); dis = dis / 16.0; }
void testFunction(cv::InputArray ip, cv::InputArray op) { cv::Mat img = ip.getMat(); cv::Mat obj = op.getMat(); printMatrix(img); printMatrix(obj); // std::cerr<<img.checkVector()<<std::endl; }
void stereo::stereoRectify(cv::InputArray _K1, cv::InputArray _K2, cv::InputArray _R, cv::InputArray _T, cv::OutputArray _R1, cv::OutputArray _R2, cv::OutputArray _P1, cv::OutputArray _P2) { Mat K1 = _K1.getMat(), K2 = _K2.getMat(), R = _R.getMat(), T = _T.getMat(); _R1.create(3, 3, CV_32F); _R2.create(3, 3, CV_32F); Mat R1 = _R1.getMat(); Mat R2 = _R2.getMat(); _P1.create(3, 4, CV_32F); _P2.create(3, 4, CV_32F); Mat P1 = _P1.getMat(); Mat P2 = _P2.getMat(); if(K1.type()!=CV_32F) K1.convertTo(K1,CV_32F); if(K2.type()!=CV_32F) K2.convertTo(K2,CV_32F); if(R.type()!=CV_32F) R.convertTo(R,CV_32F); if(T.type()!=CV_32F) T.convertTo(T,CV_32F); if(T.rows != 3) T = T.t(); // R and T is the transformation from the first to the second camera // Get the transformation from the second to the first camera Mat R_inv = R.t(); Mat T_inv = -R.t()*T; Mat e1, e2, e3; e1 = T_inv.t() / norm(T_inv); /*Mat z = (Mat_<float>(1, 3) << 0.0,0.0,-1.0); e2 = e1.cross(z); e2 = e2 / norm(e2);*/ e2 = (Mat_<float>(1,3) << T_inv.at<float>(1)*-1, T_inv.at<float>(0), 0.0 ); e2 = e2 / (sqrt(e2.at<float>(0)*e2.at<float>(0) + e2.at<float>(1)*e2.at<float>(1))); e3 = e1.cross(e2); e3 = e3 / norm(e3); e1.copyTo(R1.row(0)); e2.copyTo(R1.row(1)); e3.copyTo(R1.row(2)); R2 = R_inv * R1; P1.setTo(Scalar(0)); R1.copyTo(P1.colRange(0, 3)); P1 = K1 * P1; P2.setTo(Scalar(0)); R2.copyTo(P2.colRange(0, 3)); P2 = K2 * P2; }
void SLICSegment2Vector3D_(cv::InputArray segment, cv::InputArray signal, double invalidValue, std::vector<std::vector<cv::Point3_<S>>>& segmentPoint) { double minv, maxv; minMaxLoc(segment, &minv, &maxv); segmentPoint.clear(); segmentPoint.resize((int)maxv + 1); if (signal.depth() == CV_8U) _SLICSegment2Vector3D_<uchar, S>(segment, signal, (uchar)invalidValue, segmentPoint); else if (signal.depth() == CV_16S) _SLICSegment2Vector3D_<short, S>(segment, signal, (short)invalidValue, segmentPoint); else if (signal.depth() == CV_16U) _SLICSegment2Vector3D_<ushort, S>(segment, signal, (ushort)invalidValue, segmentPoint); else if (signal.depth() == CV_32S) _SLICSegment2Vector3D_<int, S>(segment, signal, (int)invalidValue, segmentPoint); else if (signal.depth() == CV_32F) _SLICSegment2Vector3D_<float, S>(segment, signal, (float)invalidValue, segmentPoint); else if (signal.depth() == CV_64F) _SLICSegment2Vector3D_<double, S>(segment, signal, (double)invalidValue, segmentPoint); }
cv::Mat flutter::estimate_rigid_transform(cv::InputArray src1, cv::InputArray src2, double ransac_good_ratio, double ransac_threshold) { Mat M(2, 3, CV_64F), A = src1.getMat(), B = src2.getMat(); CvMat matA = A, matB = B, matM = M; int err = estimate_rigid_transform_detail(&matA, &matB, &matM, ransac_good_ratio, ransac_threshold); if (err == 1) { return M; } else { return Mat(); } }
void StereoMatch::StereoMatching(cv::InputArray rec_image1, cv::InputArray rec_image2, cv::OutputArray disparity_map, int min_disparity, int num_disparities, int SAD_window_size, int P1, int P2) { cv::Mat img1 = rec_image1.getMat(); cv::Mat img2 = rec_image2.getMat(); disparity_map.create(img1.size(), CV_16S); cv::Mat dis = disparity_map.getMat(); cv::StereoSGBM matcher(min_disparity, num_disparities, SAD_window_size, P1, P2); matcher(img1, img2, dis); dis = dis / 16.0; }
void cv::SCascade::detect(cv::InputArray _image, cv::InputArray _rois, std::vector<Detection>& objects) const { // only color images are supperted cv::Mat image = _image.getMat(); CV_Assert(image.type() == CV_8UC3); Fields& fld = *fields; fld.calcLevels(image.size(),(float) minScale, (float)maxScale, scales); objects.clear(); if (_rois.empty()) return detectNoRoi(image, objects); int shr = fld.shrinkage; cv::Mat roi = _rois.getMat(); cv::Mat mask(image.rows / shr, image.cols / shr, CV_8UC1); mask.setTo(cv::Scalar::all(0)); cv::Rect* r = roi.ptr<cv::Rect>(0); for (int i = 0; i < (int)roi.cols; ++i) cv::Mat(mask, cv::Rect(r[i].x / shr, r[i].y / shr, r[i].width / shr , r[i].height / shr)).setTo(cv::Scalar::all(1)); // create integrals ChannelStorage storage(image, shr); typedef std::vector<Level>::const_iterator lIt; for (lIt it = fld.levels.begin(); it != fld.levels.end(); ++it) { const Level& level = *it; // we train only 3 scales. if (level.origScale > 2.5) break; for (int dy = 0; dy < level.workRect.height; ++dy) { uchar* m = mask.ptr<uchar>(dy); for (int dx = 0; dx < level.workRect.width; ++dx) { if (m[dx]) { storage.offset = dy * storage.step + dx; fld.detectAt(dx, dy, level, storage, objects); } } } } if (rejCriteria != NO_REJECT) suppress(rejCriteria, objects); }
void CV_HomographyTest::check_transform_quality(cv::InputArray src_points, cv::InputArray dst_points, const cv::Mat& H, const int norm_type) { Mat src, dst_original; cv::transpose(src_points.getMat(), src); cv::transpose(dst_points.getMat(), dst_original); cv::Mat src_3d(src.rows+1, src.cols, CV_32FC1); src_3d(Rect(0, 0, src.rows, src.cols)) = src; src_3d(Rect(src.rows, 0, 1, src.cols)) = Mat(1, src.cols, CV_32FC1, Scalar(1.0f)); cv::Mat dst_found, dst_found_3d; cv::multiply(H, src_3d, dst_found_3d); dst_found = dst_found_3d/dst_found_3d.row(dst_found_3d.rows-1); double reprojection_error = cv::norm(dst_original, dst_found, norm_type); CV_Assert ( reprojection_error > max_diff ); }
void Regression::write(cv::InputArray array) { write() << "kind" << array.kind(); write() << "type" << array.type(); if (isVector(array)) { int total = (int)array.total(); int idx = regRNG.uniform(0, total); write() << "len" << total; write() << "idx" << idx; cv::Mat m = array.getMat(idx); if (m.total() * m.channels() < 26) //5x5 or smaller write() << "val" << m; else write(m); } else { if (array.total() * array.channels() < 26) //5x5 or smaller write() << "val" << array.getMat(); else write(array.getMat()); } }
FeatureValue FeatureShiCorner::calculate( cv::InputArray image ) { Mat image_gray; cvtColor( image, image_gray, CV_BGR2GRAY ); Mat corner( image.rows(), image.cols(), CV_32SC1, Scalar( 0 ) ); computeRawCornerMat( image_gray, corner ); auto points = genPoints( corner ); // return genDescriptor; return FeatureValue(); }
void printSmallMatrixCVChar(char *title, cv::InputArray inArr) { printf("%s\n", title); printf("----------------------------------\n"); for (int rows = 0; rows < inArr.getMat().rows; rows++) { printf("ROW: %02d ---> ", rows); for (int cols = 0; cols < IMG_WIDTH; cols++) { printf(" %04d ", inArr.getMat().at<unsigned char>(rows, cols)); } printf("\n"); } printf("\n"); }
void Tools::ReduceRowByMost(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _mask) { cv::Mat src = _src.getMat(); _dst.create(1, src.cols, CV_8UC1); cv::Mat dst = _dst.getMat(); cv::Mat mask = _mask.getMat(); if (src.depth() != CV_8U || mask.depth() != CV_8U) { throw "TYPE DON'T SUPPORT"; } int i, j, addr; cv::Size size = src.size(); uchar *srcd = src.data; uchar *dstd = dst.data; uchar *maskd = mask.data; std::map<uchar, int> m; std::map<uchar, int>::iterator p; uchar mostVal; uchar rVal; int r; for (i = 0;i < size.width;++i) { m.clear(); mostVal = 0; for (j = 0;j < size.height;++j) { addr = j*size.width + i; if (!maskd[addr]) continue; for (r = -2;r <= 2;++r) { rVal = (uchar)(((int)srcd[addr] + r) % 180); if (m.find(rVal) != m.end()) { m[rVal]++; } else { m[rVal] = 1; } } } if (m.size() == 0) { dstd[i] = 0; continue; } mostVal = m.begin()->first; for (p = m.begin();p != m.end();++p) { if (p->second > m[mostVal]) { mostVal = p->first; } } dstd[i] = mostVal; } }
// Computes an Lidfaces model with images in src and corresponding labels // in labels. void Lidfaces::train(cv::InputArrayOfArrays src, cv::InputArray labels) { std::vector<std::vector<cv::KeyPoint> > allKeyPoints; cv::Mat descriptors; // Get SIFT keypoints and LID descriptors detectKeypointsAndDescriptors(src, allKeyPoints, descriptors); // kmeans function requires points to be CV_32F descriptors.convertTo(descriptors, CV_32FC1); // Do k-means clustering const int CLUSTER_COUNT = params::lidFace::clustersAsPercentageOfKeypoints*descriptors.rows; cv::Mat histogramLabels; // This function populates histogram bin labels // The nth element of histogramLabels is an integer which represents the cluster that the // nth element of allKeyPoints is a member of. kmeans( descriptors, // The points we are clustering are the descriptors CLUSTER_COUNT, // The number of clusters (K) histogramLabels, // The label of the corresponding keypoint params::kmeans::termCriteria, params::kmeans::attempts, params::kmeans::flags, mCenters); // Convert to single channel 32 bit float as the matrix needs to be in a form supported // by calcHist histogramLabels.convertTo(histogramLabels, CV_32FC1); // We end up with a histogram for each image const size_t NUM_IMAGES = getSize(src); std::vector<cv::Mat> hists(NUM_IMAGES); // mCodebook.resize(NUM_IMAGES); // The histogramLabels vector contains ALL the points from EVERY image. We need to split // it up into groups of points for each image. // Because there are the same number of points in each image, and the points were put // into the labels vector in order, we can simply divide the labels vector evenly to get // the individual image's points. std::vector<cv::Mat> separatedLabels; for (unsigned int i = 0, startRow = 0; i < NUM_IMAGES; ++i) { separatedLabels.push_back( histogramLabels.rowRange( startRow, startRow + allKeyPoints[i].size())); startRow += allKeyPoints[i].size(); } // Populate the hists vector generateHistograms(hists, separatedLabels, CLUSTER_COUNT); // Make the magnitude of each histogram equal to 1 normalizeHistograms(hists); mCodebook = hists; mLabels = labels.getMat(); }
void RadiometricResponse::directMap(cv::InputArray _E, cv::OutputArray _I) const { if (_E.empty()) { _I.clear(); return; } auto E = _E.getMat(); _I.create(_E.size(), CV_8UC3); auto I = _I.getMat(); #if CV_MAJOR_VERSION > 2 E.forEach<cv::Vec3f>( [&I, this](cv::Vec3f& v, const int* p) { I.at<cv::Vec3b>(p[0], p[1]) = inverseLUT(response_channels_, v); }); #else for (int i = 0; i < E.rows; i++) for (int j = 0; j < E.cols; j++) I.at<cv::Vec3b>(i, j) = inverseLUT(response_channels_, E.at<cv::Vec3f>(i, j)); #endif }
static bool calcLut(cv::InputArray _src, cv::OutputArray _dst, const int tilesX, const int tilesY, const cv::Size tileSize, const int clipLimit, const float lutScale) { cv::ocl::Kernel k("calcLut", cv::ocl::imgproc::clahe_oclsrc); if(k.empty()) return false; cv::UMat src = _src.getUMat(); _dst.create(tilesX * tilesY, 256, CV_8UC1); cv::UMat dst = _dst.getUMat(); int tile_size[2]; tile_size[0] = tileSize.width; tile_size[1] = tileSize.height; size_t localThreads[3] = { 32, 8, 1 }; size_t globalThreads[3] = { tilesX * localThreads[0], tilesY * localThreads[1], 1 }; int idx = 0; idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(src)); idx = k.set(idx, cv::ocl::KernelArg::WriteOnlyNoSize(dst)); idx = k.set(idx, tile_size); idx = k.set(idx, tilesX); idx = k.set(idx, clipLimit); k.set(idx, lutScale); return k.run(2, globalThreads, localThreads, false); }
int boostColor(cv::InputArray src, cv::OutputArray dst, float intensity) { const int MAX_INTENSITY = 255; Mat srcImg = src.getMat(); CV_Assert(srcImg.channels() == 3); CV_Assert(intensity >= 0.0f && intensity <= 1.0f); if (srcImg.type() != CV_8UC3) { srcImg.convertTo(srcImg, CV_8UC3); } Mat srcHls; cvtColor(srcImg, srcHls, CV_BGR2HLS); int intensityInt = intensity * MAX_INTENSITY; srcHls += Scalar(0, 0, intensityInt); cvtColor(srcHls, dst, CV_HLS2BGR); dst.getMat().convertTo(dst, srcImg.type()); return 0; }
void StairDetection::CannyThreshold(cv::InputArray image, cv::Mat &edges) { if (image.channels() > 1) { /// Convert the image to grayscale cvtColor(image, edges, CV_RGB2GRAY); /// Reduce noise with a kernel 3x3 blur(edges, edges, cv::Size(3, 3)); } else { /// src is already a grayscale image. edges = image.getMat().clone(); } /// Canny detector Canny(edges, edges, cannyLowThreshold, cannyLowThreshold*cannyRatio, cannyKernelSize); }
void EdgeDetector_<ParallelUtils::eGLSL>::apply_async_glimpl(cv::InputArray _oNextImage, bool bRebindAll, double dThreshold) { m_dCurrThreshold = dThreshold; cv::Mat oNextInputImg = _oNextImage.getMat(); CV_Assert(oNextInputImg.size()==m_oFrameSize); CV_Assert(oNextInputImg.isContinuous()); GLImageProcAlgo::apply_async(oNextInputImg,bRebindAll); }
void BackgroundSubtractorMedian::operator()(cv::InputArray _image, cv::OutputArray _fgmask, double learningRate) { framecount++; cv::Mat image = _image.getMat(); if (image.channels() > 1) { cvtColor(image,image,CV_BGR2GRAY); } if (image.cols == 0 || image.rows == 0) { return; } _fgmask.create(image.size(), CV_8U); cv::Mat fgmask = _fgmask.getMat(); if (!init) { init = true; bgmodel = cv::Mat(image.size(), CV_8U); } //printf("(%d,%d)(%d) ",image.cols,image.rows,image.type()); //printf("(%d,%d)(%d)\n",bgmodel.cols,bgmodel.rows,bgmodel.type()); cv::Mat cmpArr = cv::Mat(image.size(),CV_8U); cv::compare(image, bgmodel, cmpArr, CV_CMP_GT); cv::bitwise_and(cmpArr, 1, cmpArr); cv::add(bgmodel, cmpArr, bgmodel); cmpArr = cv::Mat(image.size(),CV_8U); cv::compare(image, bgmodel, cmpArr, CV_CMP_LT); cv::bitwise_and(cmpArr, 1, cmpArr); cv::subtract(bgmodel, cmpArr, bgmodel); cv::absdiff(image, bgmodel,fgmask); cv::threshold(fgmask,fgmask,fg_threshold,255,CV_THRESH_TOZERO); cv::medianBlur(fgmask,fgmask,median_filter_level); }
void FilterBase::apply(cv::InputArray _src, cv::OutputArray _dst, const int &ddepth){ int stype = _src.type(); int dcn = _src.channels(); int depth = CV_MAT_DEPTH(stype); if (0 <= ddepth) depth = ddepth; Mat src, dst; src = _src.getMat(); Size sz = src.size(); _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); int imageWidth = src.rows; int imageHeight = src.cols; Mat srcChannels[3]; split(src, srcChannels); int margineWidth = kernel.cols / 2; int margineHeight = kernel.rows / 2; double kernelElemCount = (double)(kernel.cols * kernel.rows); for(int ch = 0; ch < dcn; ++ch){ for(int y = 0; y < imageHeight; ++y){ Vec3d *ptr = dst.ptr<Vec3d>(y); for(int x = 0; x < imageWidth; ++x){ if (isEdge(x, y, imageWidth, imageHeight, margineWidth, margineWidth)){ ptr[x][ch] = calcKernelOutputAtEdge(srcChannels[ch], kernel, x, y, imageWidth, imageHeight, margineWidth, margineHeight); }else{ ptr[x][ch] = calcKernelOutput(srcChannels[ch], kernel, x, y, margineWidth, margineHeight, kernelElemCount); } } } } }
void IEdgeDetector_GLSL::apply_gl(cv::InputArray _oNextImage, bool bRebindAll, double dThreshold) { lvAssert_(GLImageProcAlgo::m_bGLInitialized,"algo must be initialized first"); m_dCurrThreshold = dThreshold; cv::Mat oNextInputImg = _oNextImage.getMat(); lvAssert_(oNextInputImg.size()==GLImageProcAlgo::m_oFrameSize,"input image size must match initialization size"); lvAssert_(oNextInputImg.isContinuous(),"input image must be continuous"); GLImageProcAlgo::apply_gl(oNextInputImg,bRebindAll); }
void stereo::stereoMatching(cv::InputArray _recImage1, cv::InputArray _recIamge2, cv::OutputArray _disparityMap, int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2) { Mat img1 = _recImage1.getMat(); Mat img2 = _recIamge2.getMat(); _disparityMap.create(img1.size(), CV_16S); Mat dis = _disparityMap.getMat(); // create(int minDisparity, int numDisparities, int blockSize,) Ptr<StereoSGBM> matcher = StereoSGBM::create(minDisparity, numDisparities, SADWindowSize); matcher->setP1(P1); matcher->setP2(P2); matcher->compute(img1, img2, dis); //StereoSGBM matcher(minDisparity, numDisparities, SADWindowSize, P1, P2); //matcher(img1, img2, dis); dis = dis / 16.0; }
inline void normAssert(cv::InputArray ref, cv::InputArray test, const char *comment = "", double l1 = 0.00001, double lInf = 0.0001) { double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); EXPECT_LE(normL1, l1) << comment; double normInf = cvtest::norm(ref, test, cv::NORM_INF); EXPECT_LE(normInf, lInf) << comment; }
void FaceDetectorModule::ProcessFrame(cv::InputArray in, cv::OutputArray out){ if(IsEnabled()){ float scale = imageScale; float unscale = 1.0/scale; cv::UMat latestStep; in.copyTo(latestStep); cv::resize(latestStep, latestStep, cv::Size(), scale, scale, INTER_NEAREST); cvtColor( latestStep, latestStep, CV_BGR2GRAY ); equalizeHist( latestStep, latestStep ); cv::UMat outCopy; //outCopy = cv::Mat(in.cols(), in.rows(), in.type()); out.copyTo(outCopy); std::vector<cv::Rect> faces; //-- Detect faces face_cascade.detectMultiScale( latestStep, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) ); //http://docs.opencv.org/3.0.0/db/d28/tutorial_cascade_classifier.html for( size_t i = 0; i < faces.size(); i++ ) { cv::Point center( (faces[i].x + faces[i].width*0.5) * unscale, (faces[i].y + faces[i].height*0.5) * unscale); ellipse( outCopy, center, cv::Size( (faces[i].width*0.5) * unscale, (faces[i].height*0.5) * unscale), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 ); /* cv::UMat faceROI = latestStep( faces[i] ); std::vector<cv::Rect> eyes; //-- In each face, detect eyes eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, cv::Size(30, 30) ); for( size_t j = 0; j < eyes.size(); j++ ) { cv::Point eye_center( (faces[i].x + eyes[j].x + eyes[j].width/2) * unscale, (faces[i].y + eyes[j].y + eyes[j].height/2) * unscale ); int radius = cvRound( (eyes[j].width + eyes[j].height) * 0.25 * unscale ); circle( outCopy, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 ); } */ } outCopy.copyTo(out); } else{ //in.copyTo(out); } }
void _SLICSegment2Vector3D_(cv::InputArray segment, cv::InputArray signal, T invalidValue, std::vector<std::vector<cv::Point3_<S>>>& segmentPoint) { Mat sig = signal.getMat(); Mat seg = segment.getMat(); for (int j = 0; j < seg.rows; ++j) { int* s = seg.ptr<int>(j); T* value = sig.ptr<T>(j); for (int i = 0; i < seg.cols; ++i) { if (value[i] != invalidValue) { segmentPoint[s[i]].push_back(Point3_<S>((S)i, (S)j, (S)value[i])); } } } }