bool rotatedRectangleContainPoint(cv::RotatedRect rectangle, cv::Point2f point) { cv::Point2f corners[4]; rectangle.points(corners); cv::Point2f *lastItemPointer = (corners + sizeof corners / sizeof corners[0]); std::vector<cv::Point2f> contour(corners, lastItemPointer); double indicator = pointPolygonTest(contour, point, false); return indicator >= 0; }
void ForegroundProcessor::distanceFilter(Frame & frame, double minDist) { Mat temp = Mat::zeros(frame.foreground.size(), frame.foreground.type()); vector<vector<Point>> contours; findContours( frame.foreground.clone(), contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); Rect objRect; double dist = 0; double minSize = 20; for(unsigned int i = 0; i < contours.size(); i++) { objRect = boundingRect(contours[i]); vector<Point> contour = contours[i]; //Measure distance to the contour of all pixels within the bounding box. for( int j = objRect.x; j < objRect.x + objRect.width; j++) { for( int k = objRect.y; k < objRect.y + objRect.height; k++) { if (pointPolygonTest(contour, Point(j, k), false) == 1) //If object is inside the contour { dist = max(dist, pointPolygonTest(contour, Point(j, k), true)); // Calculate distance } } } if (dist > minDist) //Draw contour only if distance is great enough. { drawContours(temp, contours, i, Scalar(255), CV_FILLED); } dist = 0; } frame.foreground = temp; }
bool Delaunay::iswithinTri(const Point2f &pt, int tri_id) { triangle tri=triangulation[tri_id]; vector<Point2f> contour; convert2Contour(tri,contour); return pointPolygonTest(contour,pt,false)>0; }
void BorderMattingHandler::rejectOutlier(){ vector< vector <Point> > contours; // Vector for storing contour vector< Vec4i > hierarchy; int largest_contour_index=0; int largest_area=0; Mat _alpha = alphamap.clone(); findContours(_alpha, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); //get largest contour for (int i = 0; i < contours.size(); i++) { double a = contourArea(contours[i],false); if (a > largest_area) { largest_area = a; largest_contour_index = i; } } Point p; for (p.y = 0; p.y<alphamap.rows; p.y++) { for (p.x = 0; p.x<alphamap.cols; p.x++) { if (pointPolygonTest(contours[largest_contour_index], p, false)<0){ alphamap.at<uchar>(p) = 0; } } } }
//-------------------------------------------------- vector<KeyPoint> MatchableImage::findFeatures(string alg_name, Mat &bounds) { vector<KeyPoint> tempFeatures = findFeatures(alg_name); features.clear(); for(int i=0; i<tempFeatures.size(); i++) { double inside = pointPolygonTest(bounds, features[i].pt, true); if(inside>=0) { features.push_back( tempFeatures[i] ); } else { double dist = abs(inside); if(dist < (tempFeatures[i].size/2.)) { features.push_back( tempFeatures[i] ); } } } log(LOG_LEVEL_DEBUG, "in findFeatures(), after filtering by bounds, there are %d features", features.size()); #ifdef DEBUG for(int i=0; i<4; i++) { float *p1 = bounds.ptr<float>(i%4); float *p2 = bounds.ptr<float>((i+1)%4); //line(cvImage, Point(p1[0], p1[1]), Point(p2[0],p2[1]), CV_RGB(255,255,255), 3); } #endif featuresCurrent=true; return features; }
bool Jive::findPalmCenter(vector<cv::Point> &contour, cv::Point ¢er, float &radius) { bool rc = false; float m10, m01, m00; m10 = m01 = m00 = 0; for (int x=0; x < _binaryMat.cols; x++) { for (int y=0; y < _binaryMat.rows; y++) { cv::Point p = cv::Point(x, y); if (pointPolygonTest(contour, p, false) >= 0) { float val = _binaryMat.at<float>(y, x); m00 += val; m10 += x*val; m01 += y*val; } } } if (m00 > 0.0) { center = cv::Point((int)(m10/m00), (int)(m01/m00)); float r2 = 2*getDim().width*getDim().width; for (int i=0; i<contour.size(); i++) { float x2 = (float)contour[i].x - center.x; x2 *= x2; float y2 = (float)contour[i].y - center.y; y2 *= y2; if (r2 > x2+y2) r2 = x2+y2; } radius = sqrt(r2); rc = true; } return rc; }
void StairDetection::GetIntersectHull(std::vector<cv::Point> &stairsConvexHull_normal, std::vector<cv::Point> &stairsConvexHull_inverse, std::vector<cv::Point> &intersectConvexHull) { if (stairsConvexHull_normal.empty() || stairsConvexHull_inverse.empty()) return; for (cv::Point p : stairsConvexHull_normal) { if (pointPolygonTest(stairsConvexHull_inverse, p, false) >= 0) { intersectConvexHull.push_back(p); } } for (cv::Point p : stairsConvexHull_inverse) { if (pointPolygonTest(stairsConvexHull_normal, p, false) >= 0) { intersectConvexHull.push_back(p); } } }
////////////////// Shadow Suppression //////////////////////// void ForegroundProcessor::suppressShadows(Frame & frame, double minArea, double minDist) { //Create "most probable background" if (shadowModel.empty()) shadowModel = Mat::zeros(frame.image.size(), CV_8UC3); frameCounter++; if (frameCounter < 10) { shadowModel += (frame.image / 10); return; } vector<vector<Point>> contours; findContours( frame.foreground.clone(), contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); Mat lastImage = frame.image.clone(); cvtColor(lastImage, lastImage, CV_BGR2HSV_FULL); double objArea; Rect objRect; double dist = 0; for(unsigned int i = 0; i < contours.size(); i++) { objArea = contourArea(contours[i]); objRect = boundingRect(contours[i]); vector<Point> contour = contours[i]; for( int j = objRect.x; j < objRect.x + objRect.width; j++) { for( int k = objRect.y; k < objRect.y + objRect.height; k++) { Point matPos(j,k); //If object is not outside the contour if (pointPolygonTest(contour, matPos, false) >= 0) { // Parameters for shadow detection if ( ((abs((double)lastImage.at<Vec3b>(matPos)[0] - (double)shadowModel.at<Vec3b>(matPos)[0])/255 < tau_H) ) // HUE && ( ((double)lastImage.at<Vec3b>(matPos)[1] - (double)shadowModel.at<Vec3b>(matPos)[1])/255 < tau_S) // SATURATION && ( (double)lastImage.at<Vec3b>(matPos)[2] / ((double)shadowModel.at<Vec3b>(matPos)[2] + 0.0001) > alpha) // VALUE (ALPHA) && ( (double)lastImage.at<Vec3b>(matPos)[2] / ((double)shadowModel.at<Vec3b>(matPos)[2] + 0.0001) < beta) // VALUE (BETA) ) { //Color gray for visualisation frame.foreground.at<uchar>(matPos) = 128; } } } } } }
bool MinEnclosingTriangleFinderTest::ArePointsEnclosed() { double distance = 0; for (const cv::Point2f &point : points) { distance = pointPolygonTest(triangle, point, true); if (distance < -(POINT_IN_TRIANGLE_THRESH)) { return false; } } return true; }
bool BlobDetection::isQualifyingContour(vector<Point> contour, vector<Point>cutOffRegion, vector<vector<Point>> *blobsInCutoff) { bool ratioQualified = false; int minimum_width = 30;//30; int maximum_width = 180;//100; int minimum_height = 30;// 30; double minimum_htow_ratio = 1.3; Rect roi = boundingRect(contour); double heightToWidthRatio = static_cast<double>(roi.height) / static_cast<double>(roi.width); if (roi.width > minimum_width && roi.width < maximum_width && roi.height > minimum_height && heightToWidthRatio > minimum_htow_ratio ) { ratioQualified = true; } if (ratioQualified) { bool withinCutOff = true; vector<Point> boundinBox; //int constantval = 300; boundinBox.push_back(Point(roi.x, roi.y)); boundinBox.push_back(Point(roi.x + roi.width, roi.y)); boundinBox.push_back(Point(roi.x + roi.width, roi.y + roi.width)); boundinBox.push_back(Point(roi.x , roi.y + roi.width)); for (int pointId = 0; pointId < boundinBox.size(); pointId++) { Point pt = boundinBox[pointId]; int withinCutOffD = pointPolygonTest(cutOffRegion, pt, true); if (withinCutOffD<0) { //if at least one point is not within the cutoff region this becomes a qualifying blob return true; } } //if this point is reached the whole contour is withing the cutoff blobsInCutoff->push_back(contour); return false; } //This is reached if ratioQualified is false return ratioQualified; }
Mat AAM::getTextureInsideHull(Mat image, Mat convexHull) { //cout<<"type: "<<image.channels()<<" "<<image.type()<<endl; image.convertTo(image, 0); imshow("image to get texture", image); Mat texture(0, 1, CV_64F); for(int i=0;i<image.rows;i++) { for (int j=0;j<image.cols;j++) { double distance = pointPolygonTest(convexHull,cvPoint2D32f(i,j),1); if(distance >=0){ texture.push_back(image.at<uchar>(j,i)); } } } return texture.t(); }
//class function for BorderMattingHandler. BorderMattingHandler::BorderMattingHandler(const Mat& image, vector<Point>& contour){ if (image.empty()) { return; } image.copyTo(_img); //init trimap trimap.create( _img.size(), CV_8UC1); Point p; for (p.y = 0; p.y < image.rows; p.y++) { for (p.x = 0; p.x < image.cols; p.x++) { double dist = pointPolygonTest(contour, Point2f(p.x,p.y), false); if (dist < 0) { trimap.at<uchar>(p) = BM_B; } else if (dist > 0){ trimap.at<uchar>(p) = BM_F; } else{ trimap.at<uchar>(p) = BM_U; } } } const int MIN_DIST = 4; Point cupt = contour[0]; for (int i = 1; i < contour.size(); i++) { Line ln; ln.src = cupt; if (ptdist(cupt,contour[i]) > MIN_DIST) { ln.dst = contour[i]; contours.push_back(ln); cupt = contour[i]; } } /*for (int i = 0; i < contours.size(); i++) { line(image, contours[i].src, contours[i].dst, Scalar(0,0,255)); imshow("contour", image); waitKey(1); }*/ constructTrimap(); }
std::pair<Point, double> Detect::findMaxInscribedCircle( const vector<vector<Point>>& polyCurves, const Mat& frame) { std::pair<Point, double> c; double dist = -1; double maxdist = -1; if (polyCurves.size() > 0) { for (int i = 0; i < frame.cols; i+=10) { for (int j = 0; j < frame.rows; j+=10) { dist = pointPolygonTest(polyCurves[0], Point(i,j), true); if (dist > maxdist) { maxdist = dist; c.first = Point(i,j); } } } c.second = maxdist; } return c; }
void CMouthComponent::renderComponentInColor( std::vector<cv::Point> templatePoints, cv::Mat templateMat, int width, int height ) { warpedTemplate = cvCreateMat(height, width, CV_8UC3); warpedTemplate.setTo(255); std::vector<cv::Point> mouthPoints = getLocatedPoints(); for (int i = 0; i< mouthPoints.size(); i++) { int j; if ( i == 11 || i == 19) { j = 0; } else { j = i + 1; } cv::Point kp1 = mouthPoints[i]; cv::Point kp2 = mouthPoints[j]; cv::line(warpedTemplate, kp1, kp2,cv::Scalar(70, 75, 170), 1); } cv::Mat_<cv::Vec3b> _I = warpedTemplate; #pragma omp parallel for for( int row = 0; row < warpedTemplate.rows; ++row){ for( int col = 0; col < warpedTemplate.cols; ++col ) { double isMouthPoint = pointPolygonTest( mouthPoints, cv::Point2f(col,row), false ); if(isMouthPoint > 0){ _I(row,col)[0] = 100; _I(row,col)[1] = 100; _I(row,col)[2] = 200; } } } warpedTemplate = _I; }
std::vector<cv::Mat> ObjectSelector::getObjects(cv::Mat * src, cv::Point2f p) { std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy; cv::Mat canny_output; cv::Mat src_gray; cvtColor( *src, src_gray, CV_BGR2GRAY ); blur( src_gray, src_gray, cv::Size(3,3) ); /// Detect edges using canny Canny( src_gray, canny_output, thresh, thresh*2, 3 ); /// Find contours findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) ); std::priority_queue<TestedContour> results; for( int i = 0; i< contours.size(); i++ ) results.push(TestedContour(i,-pointPolygonTest(contours[i],p,true))); std::vector<cv::Mat> retVal; for(int i = 0; i < maxRet && !results.empty(); i++) { TestedContour c = results.top(); results.pop(); cv::Rect br = boundingRect( contours[c.Id()] ); cv::Mat mask = cv::Mat::zeros(src->size(), CV_8UC1); std::vector<std::vector<cv::Point> > hull; hull.push_back(std::vector<cv::Point>()); convexHull(contours[c.Id()],hull[0],1,true); drawContours( mask, hull, 0, cv::Scalar(255), CV_FILLED ); cv::Mat tmp = cv::Mat::zeros(br.size(), CV_8UC3); src->copyTo(tmp, mask); retVal.push_back(cv::Mat(tmp,br)); } return retVal; }
void AAM::displayModel() { Mat convexHull = this->createConvexHull(this->meanPoints); Mat image=Mat::zeros(ImageWidth, ImageHeight, CV_8UC1); int n=0; //cout<<"Mean texture type "<<this->meanTexture.type()<<endl; for(int i=0;i<image.rows;i++) { for (int j=0;j<image.cols;j++) { double distance = pointPolygonTest(convexHull,cvPoint2D32f(i,j),1); if(distance >=0){ if(n%30==0) { cout<<this->meanTexture.at<int>(0, n/sampling)<<endl; image.at<uchar>(j,i)=cvRound(this->meanTexture.at<int>(0, n/30)); //image.at<uchar>(j,i)=128; } n++; } } } cout<<this->meanTexture.type()<<endl; imshow("mean model", image); }
bool Num_Extract::validate (Mat mask, Mat pre){ std::vector<std::vector<cv::Point> > contour; Mat img; bool validate = false; bool validate1 = false; bool big = false; Canny(mask,img,0,256,5); vector<Vec4i> hierarchy; //find contours from post color detection cv::findContours(img, contour, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); for(int i = 0 ; i<contour.size();i++){ if(contourArea( contour[i],false)>0.5*320*240)big = true;// If too close to object } int count = 0; for(int i = 0 ; i<contour.size();i++){ if(contourArea( contour[i],false)>1000) count++; } if(count == 0 )return validate;//filter out random noise Mat grey,grey0,grey1,grey2,grey3; vector<Mat> bgr_planes; split(pre,bgr_planes); std::vector<std::vector<cv::Point> > contour1; std::vector<cv::Point> inner; double area = 0; vector<int> valid_index ; vector<int> valid_test,bins_indices; for(int i = 0 ; i<contour.size();i++){ if(contourArea( contour[i],false)>1000){ area = area + contourArea( contour[i],false); valid_test.push_back(i); for(int j = 0;j < contour[i].size();j++){ inner.push_back(contour[i][j]); } } } RotatedRect inrect = minAreaRect(Mat(inner));//bounding rectangle of bins (if detected) RotatedRect outrect ; double thresh = 0; double threshf; vector<int> count1; int count2 = 0; vector<Point> poly; if(!big){ while(thresh < 1000 && (!validate && !validate1)){ Canny(bgr_planes[0],grey1,0,thresh,5);//multi level canny thresholding Canny(bgr_planes[1],grey2,0,thresh,5); Canny(bgr_planes[2],grey3,0,thresh,5); max(grey1,grey2,grey1); max(grey1,grey3,grey);//getting strongest edges dilate(grey , grey0 , Mat() , Point(-1,-1)); grey = grey0; cv::findContours(grey, contour1,hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE); for(int i = 0;i < contour1.size();i++){ if(hierarchy[i][3]==-1){ continue;//excluding the outermost contour (contour due to the mask) } if(contourArea(contour1[i],false)>area){ outrect = minAreaRect(Mat(contour1[i]));//bounding rectangle of detected contour if(A_encloses_B(outrect,inrect)){ valid_index.push_back(i); } } count2 = 0; approxPolyDP(Mat(contour1[i]),poly,3,true); if(contourArea(contour1[i],false)>1500){ for(int j = 0 ; j < valid_test.size(); j++){ RotatedRect test = minAreaRect(Mat(contour[valid_test[j]])); double area1 = contourArea(contour1[i],false); double area2 = contourArea(contour[valid_test[j]],false); if(pointPolygonTest(Mat(poly),test.center,false)>0 && area1>area2){ count2++; } } } count1.push_back(count2); poly.clear(); } bool val = false; for(int i = 0 ; i < count1.size(); i++){ if(count1[i]>=1 && val){ validate1 = true ; break; } if(count1[i]>=1){ val = true; } } if(valid_index.size()>=1){ validate = true; threshf = thresh; } thresh = thresh + 1000/11; valid_index.clear(); } } else{ validate = true; } if(validate || validate1){ return true; } return validate; }
void IITkgp_functions::ProcessingBlocks::LabelIndividualPB(void) { Continue con; SelectLabel sl; int k =0; int p = 0; for(int i=0;i<contours.size();i++) { if(hierarchy[i][3] == -1 && validblock[i] == true) { if(LabelFlag[k] == false) // If a Block is not labelled { p = p + 1; int sp; char *name; name = (char *) malloc(2000 * sizeof(char)); sp = sprintf(name,"Processing Block%d",k); tempname = name; namedWindow(name, CV_WINDOW_KEEPRATIO); Mat Temp; Temp = Blocks[k]; imshow(name, Temp); sl.setModal(true); // select Label sl.exec(); PBLabel[k] = selectedlabel; LabelFlag[k] = true; UnlabelledPB = UnlabelledPB - 1; LabelCount[selectedlabel] = LabelCount[selectedlabel] + 1; // Now Give Color To the Labelled PB vector<Point> contours_poly; Rect BoundRect; approxPolyDP( Mat(contours[i]), contours_poly, 3, true ); BoundRect = boundingRect( Mat(contours_poly) ); for(int m=BoundRect.y;m<BoundRect.y+BoundRect.height;m++) { for(int n=BoundRect.x;n<BoundRect.x+BoundRect.width;n++) { bool measure_dist; if((pointPolygonTest(contours_poly,Point(n,m),measure_dist) > 0.0) && src_binary.data[m*src_binary.cols+n]==0) { LabelImages[selectedlabel].data[m*ColorLabelImage.cols+n] = 0; LabelImageInOne.data[m*ColorLabelImage.cols+n] = selectedlabel; ColorLabelImage.data[(m*ColorLabelImage.cols+n)*3+0] = LabelColor[selectedlabel][0]; ColorLabelImage.data[(m*ColorLabelImage.cols+n)*3+1] = LabelColor[selectedlabel][1]; ColorLabelImage.data[(m*ColorLabelImage.cols+n)*3+2] = LabelColor[selectedlabel][2]; } } } con.setModal(true); // Continue labeling Individually con.exec(); if(!cflag) // do not want to continue labeling individual blocks break; destroyWindow(name); Temp.release(); } k++; } } }
bool CustomPattern::findPatternPass(const Mat& image, vector<Point2f>& matched_features, vector<Point3f>& pattern_points, Mat& H, vector<Point2f>& scene_corners, const double pratio, const double proj_error, const bool refine_position, const Mat& mask, OutputArray output) { if (!initialized) {return false; } matched_features.clear(); pattern_points.clear(); vector<vector<DMatch> > matches; vector<KeyPoint> f_keypoints; Mat f_descriptor; detector->detect(image, f_keypoints, mask); if (refine_position) refineKeypointsPos(image, f_keypoints); descriptorExtractor->compute(image, f_keypoints, f_descriptor); descriptorMatcher->knnMatch(f_descriptor, descriptor, matches, 2); // k = 2; vector<DMatch> good_matches; vector<Point2f> obj_points; for(int i = 0; i < f_descriptor.rows; ++i) { if(matches[i][0].distance < pratio * matches[i][1].distance) { const DMatch& dm = matches[i][0]; good_matches.push_back(dm); // "keypoints1[matches[i].queryIdx] has a corresponding point in keypoints2[matches[i].trainIdx]" matched_features.push_back(f_keypoints[dm.queryIdx].pt); pattern_points.push_back(points3d[dm.trainIdx]); obj_points.push_back(keypoints[dm.trainIdx].pt); } } if (good_matches.size() < MIN_POINTS_FOR_H) return false; Mat h_mask; H = findHomography(obj_points, matched_features, RANSAC, proj_error, h_mask); if (H.empty()) { // cout << "findHomography() returned empty Mat." << endl; return false; } for(unsigned int i = 0; i < good_matches.size(); ++i) { if(!h_mask.data[i]) { deleteStdVecElem(good_matches, i); deleteStdVecElem(matched_features, i); deleteStdVecElem(pattern_points, i); } } if (good_matches.empty()) return false; uint numb_elem = good_matches.size(); check_matches(matched_features, obj_points, good_matches, pattern_points, H); if (good_matches.empty() || numb_elem < good_matches.size()) return false; // Get the corners from the image scene_corners = vector<Point2f>(4); perspectiveTransform(obj_corners, scene_corners, H); // Check correctnes of H // Is it a convex hull? bool cConvex = isContourConvex(scene_corners); if (!cConvex) return false; // Is the hull too large or small? double scene_area = contourArea(scene_corners); if (scene_area < MIN_CONTOUR_AREA_PX) return false; double ratio = scene_area/img_roi.size().area(); if ((ratio < MIN_CONTOUR_AREA_RATIO) || (ratio > MAX_CONTOUR_AREA_RATIO)) return false; // Is any of the projected points outside the hull? for(unsigned int i = 0; i < good_matches.size(); ++i) { if(pointPolygonTest(scene_corners, f_keypoints[good_matches[i].queryIdx].pt, false) < 0) { deleteStdVecElem(good_matches, i); deleteStdVecElem(matched_features, i); deleteStdVecElem(pattern_points, i); } } if (output.needed()) { Mat out; drawMatches(image, f_keypoints, img_roi, keypoints, good_matches, out); // Draw lines between the corners (the mapped object in the scene - image_2 ) line(out, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 2); line(out, scene_corners[1], scene_corners[2], Scalar(0, 255, 0), 2); line(out, scene_corners[2], scene_corners[3], Scalar(0, 255, 0), 2); line(out, scene_corners[3], scene_corners[0], Scalar(0, 255, 0), 2); out.copyTo(output); } return (!good_matches.empty()); // return true if there are enough good matches }
float cv::intersectConvexConvex( InputArray _p1, InputArray _p2, OutputArray _p12, bool handleNested ) { CV_INSTRUMENT_REGION(); Mat p1 = _p1.getMat(), p2 = _p2.getMat(); CV_Assert( p1.depth() == CV_32S || p1.depth() == CV_32F ); CV_Assert( p2.depth() == CV_32S || p2.depth() == CV_32F ); int n = p1.checkVector(2, p1.depth(), true); int m = p2.checkVector(2, p2.depth(), true); CV_Assert( n >= 0 && m >= 0 ); if( n < 2 || m < 2 ) { _p12.release(); return 0.f; } AutoBuffer<Point2f> _result(n*2 + m*2 + 1); Point2f *fp1 = _result.data(), *fp2 = fp1 + n; Point2f* result = fp2 + m; int orientation = 0; for( int k = 1; k <= 2; k++ ) { Mat& p = k == 1 ? p1 : p2; int len = k == 1 ? n : m; Point2f* dst = k == 1 ? fp1 : fp2; Mat temp(p.size(), CV_MAKETYPE(CV_32F, p.channels()), dst); p.convertTo(temp, CV_32F); CV_Assert( temp.ptr<Point2f>() == dst ); Point2f diff0 = dst[0] - dst[len-1]; for( int i = 1; i < len; i++ ) { double s = diff0.cross(dst[i] - dst[i-1]); if( s != 0 ) { if( s < 0 ) { orientation++; flip( temp, temp, temp.rows > 1 ? 0 : 1 ); } break; } } } float area = 0.f; int nr = intersectConvexConvex_(fp1, n, fp2, m, result, &area); if( nr == 0 ) { if( !handleNested ) { _p12.release(); return 0.f; } if( pointPolygonTest(_InputArray(fp1, n), fp2[0], false) >= 0 ) { result = fp2; nr = m; } else if( pointPolygonTest(_InputArray(fp2, m), fp1[0], false) >= 0 ) { result = fp1; nr = n; } else { _p12.release(); return 0.f; } area = (float)contourArea(_InputArray(result, nr), false); } if( _p12.needed() ) { Mat temp(nr, 1, CV_32FC2, result); // if both input contours were reflected, // let's orient the result as the input vectors if( orientation == 2 ) flip(temp, temp, 0); temp.copyTo(_p12); } return (float)fabs(area); }
Skeleton::Skeleton(cv::Mat skeletonizedImage, cv::Mat normalImage){ //QList<LabeledPoint> startList; QList<cv::Point2i> dummyJunctionList; QList<LabeledPoint> removedPoints; QList<int> survivors; int currentLabel = 1; // SEARCH FOR ALL CURRENT BRANCHES for (int x = 0; x < skeletonizedImage.cols; x++){ for (int y = 0; y < skeletonizedImage.rows; y++){ if (skeletonizedImage.at<uchar>(y, x) == 255){ int count = 0; QVector<cv::Point2i> list; for (int i = -1; i <= 1; i++){ for (int j = -1; j <= 1; j++){ if (i != 0 || j != 0){ if (y+j >= 0 && y+j < skeletonizedImage.rows && x+i >= 0 && x+i < skeletonizedImage.cols && skeletonizedImage.at<uchar>(y+j, x+i) > 0){ bool neighbourg = false; cv::Point2i point(x+i,y+j); for (int k = 0; k < list.size(); k++){ if (cv::norm(point - list[k]) == 1){ neighbourg = true; } } if (!neighbourg){ count++; list.append(point); } } } } } if (count == 1){ cv::Point2i point(x,y); LabeledPoint lp; lp.label = currentLabel; lp.point = point; startList.append(lp); currentLabel++; } else if (count > 2){ cv::Point2i point(x,y); dummyJunctionList.append(point); } } } } // GO THROUGH THE BRANCH AND REMOVE ONE PIXEL AT A TIME for (int i = 0; i < startList.size(); i++){ cv::Point2i current = startList[i].point; int label = startList[i].label; int round = 0; bool done = false; do { LabeledPoint lp; lp.point = current; lp.label = label; removedPoints.append(lp); skeletonizedImage.at<uchar>(current.y, current.x) = 0; int count = 0; int x = current.x; int y = current.y; QVector<cv::Point2i> list; for (int i = -1; i <= 1; i++){ for (int j = -1; j <= 1; j++){ if (i != 0 || j != 0){ if (y+j >= 0 && y+j < skeletonizedImage.rows && x+i >= 0 && x+i < skeletonizedImage.cols && skeletonizedImage.at<uchar>(y+j, x+i) == 255){ bool junction = false; cv::Point2i point(x+i,y+j); for (int k = 0; k < dummyJunctionList.size(); k++){ if (cv::norm(point - dummyJunctionList[k]) <= 1){ junction = true; } } if (!junction){ count++; list.append(point); } } } } } if (count >= 1){ current = list[0]; } round ++; done = (count < 1); if (round == 12){ survivors.append(label); } } while(round < 12 && !done); } // IF THE BRANCH IS STILL LONG ENOUGH, REDRAW IT for (int i = 0; i < removedPoints.size(); i++){ cv::Point2i point = removedPoints[i].point; int label = removedPoints[i].label; if (survivors.contains(label)){ skeletonizedImage.at<uchar>(point) = 255; } } // LOOPS std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy; cv::Mat imageClone = normalImage.clone(); cv::Mat dilateElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3), cv::Point(1, 1)); cv::dilate(imageClone, imageClone, dilateElement); findContours(imageClone.clone(), contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); cv::Mat invertedImage = normalImage.clone(); if (contours.size() < 1){ qDebug() << "Error, 0 connected components detected"; } else { for (int x = 0; x < normalImage.cols; x++){ for (int y = 0; y < normalImage.rows; y++){ if (imageClone.at<uchar>(y, x) == 255){ invertedImage.at<uchar>(y, x) = 0; } else if (pointPolygonTest(contours[0], cv::Point2f(x,y), true) >= 0){ invertedImage.at<uchar>(y, x) = 255; } else { invertedImage.at<uchar>(y, x) = 0; } } } } std::vector<std::vector<cv::Point> > invertedContours; std::vector<cv::Vec4i> invertedHierarchy; findContours(invertedImage, invertedContours, invertedHierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); for (unsigned int i = 0; i < invertedContours.size(); i++){ cv::Rect rect = minAreaRect(invertedContours[i]).boundingRect(); double xNorm = ((double) rect.x + rect.width/2) / skeletonizedImage.cols; double yNorm = ((double) rect.y + rect.height/2) / skeletonizedImage.rows; cv::Point2d point(xNorm, yNorm); if (rect.width > 2 && rect.height > 2){ listHoles.push_back(point); } else { listFakeHoles.push_back(point); } } // COUNTING JUNCTIONS AND LINE ENDS for (int x = 0; x < skeletonizedImage.cols; x++){ for (int y = 0; y < skeletonizedImage.rows; y++){ if (skeletonizedImage.at<uchar>(y, x) == 255){ int count = 0; QVector<cv::Point2i> list; for (int i = -1; i <= 1; i++){ for (int j = -1; j <= 1; j++){ if (i != 0 || j != 0){ if (y+j >= 0 && y+j < skeletonizedImage.rows && x+i >= 0 && x+i < skeletonizedImage.cols && skeletonizedImage.at<uchar>(y+j, x+i) == 255){ bool neighbourg = false; cv::Point2i point(x+i,y+j); for (int k = 0; k < list.size(); k++){ if (cv::norm(point - list[k]) == 1){ neighbourg = true; } } if (!neighbourg){ count++; list.append(point); } } } } } if (count == 1 || count > 2){ double xNorm = ((double) x) / skeletonizedImage.cols; double yNorm = ((double) y) / skeletonizedImage.rows; cv::Point2d point(xNorm,yNorm); if (count == 1){ listLineEnds.push_back(point); } else { listJunctions.push_back(point); } } } } } // MERGING CLOSE JUNCTIONS bool done = true; do { done = true; int keepIndexJunction = -1; int removeIndexJunction = -1; for (int i = 0; i < listJunctions.size(); i++){ for (int j = 0; j < listJunctions.size(); j++){ if (i != j && norm(listJunctions[i] - listJunctions[j]) < MERGE_DISTANCE){ keepIndexJunction = i; removeIndexJunction = j; } } } if (keepIndexJunction != -1 && removeIndexJunction != -1){ done = false; listJunctions[keepIndexJunction].x = (listJunctions[keepIndexJunction].x + listJunctions[removeIndexJunction].x) / 2; listJunctions[keepIndexJunction].y = (listJunctions[keepIndexJunction].y + listJunctions[removeIndexJunction].y) / 2; listJunctions.removeAt(removeIndexJunction); } } while (!done); // DELETING CLOSE JUNCTIONS AND LINE ENDS, ALWAYS WRONG DATA done = true; do { done = true; int removeIndexLineEnds = -1; int removeIndexJunctions = -1; for (int i = 0; i < listLineEnds.size(); i++){ for (int j = 0; j < listJunctions.size(); j++){ if (norm(listLineEnds[i] - listJunctions[j]) < DELETE_DISTANCE){ removeIndexLineEnds = i; removeIndexJunctions = j; } } } if (removeIndexLineEnds != -1 && removeIndexJunctions != -1){ done = false; listLineEnds.removeAt(removeIndexLineEnds); listJunctions.removeAt(removeIndexJunctions); } } while (!done); // DELETING JUNCTIONS CLOSE TO FAKE LOOPS done = true; do { done = true; int removeIndexJunction = -1; for (int i = 0; i < listJunctions.size(); i++){ for (int j = 0; j < listFakeHoles.size(); j++){ if (norm(listJunctions[i] - listFakeHoles[j]) < FAKE_LOOPS_DISTANCE){ removeIndexJunction = i; } } } if (removeIndexJunction != -1){ done = false; listJunctions.removeAt(removeIndexJunction); } } while (!done); // DELETING LINE ENDS CLOSE TO FAKE LOOPS done = true; do { done = true; int removeIndexEnd = -1; for (int i = 0; i < listLineEnds.size(); i++){ for (int j = 0; j < listFakeHoles.size(); j++){ if (norm(listLineEnds[i] - listFakeHoles[j]) < FAKE_LOOPS_DISTANCE){ removeIndexEnd = i; } } } if (removeIndexEnd != -1){ done = false; listLineEnds.removeAt(removeIndexEnd); } } while (!done); // DELETING JUNCTIONS CLOSE TO BORDERS, ALWAYS WRONG DATA done = true; do { done = true; int removeIndexJunctions = -1; for (int i = 0; i < listJunctions.size(); i++){ if (listJunctions[i].x < JUNCTION_MARGIN || listJunctions[i].y < JUNCTION_MARGIN || listJunctions[i].x > 1 - JUNCTION_MARGIN || listJunctions[i].y > 1 - JUNCTION_MARGIN){ removeIndexJunctions = i; } } if (removeIndexJunctions != -1){ done = false; listJunctions.removeAt(removeIndexJunctions); } } while (!done); massCenter = getMassCenter(normalImage); total = getCount(normalImage); setParts(normalImage); listLineEnds = sort(listLineEnds); listHoles = sort(listHoles); listJunctions = sort(listJunctions); }
void TargetExtractor::regionGrow2(int areaThreshold, int diffThreshold) { Mat gray; cvtColor(mFrame, gray, CV_BGR2GRAY); Mat temp; mMask.copyTo(temp); vector<vector<Point> > contours; findContours(temp, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); int maxStackSize = gray.rows * gray.cols / 4; static int direction[8][2] = { { 0, 1 }, { 1, 1 }, { 1, 0 }, { 1, -1 }, { 0, -1 }, { -1, -1 }, { -1, 0 }, { -1, 1 } }; for (int i = 0; i < contours.size(); i++) { if (contourArea(contours[i]) < areaThreshold) { drawContours(mMask, contours, i, Scalar(0), CV_FILLED); continue; } // TODO: 修改种子选取方法 Moments mu = moments(contours[i], false); Point seed(cvRound(mu.m10 / mu.m00), cvRound(mu.m01 / mu.m00)); if (pointPolygonTest(contours[i], seed, false) < 0) { cout << "Seed not in contour!" << endl; continue; } stack<Point> pointStack; temp.at<uchar>(seed) = 255; pointStack.push(seed); Mat temp = Mat::zeros(mMask.size(), mMask.type()); uchar seedPixel = gray.at<uchar>(seed); Point cur, pop; while (!pointStack.empty() && pointStack.size() < maxStackSize) { pop = pointStack.top(); pointStack.pop(); for (int k = 0; k < 8; k++) { cur.x = pop.x + direction[k][0]; cur.y = pop.y + direction[k][1]; if (cur.x < 0 || cur.x > gray.cols - 1 || cur.y < 0 || cur.y > gray.rows - 1) { continue; } if (temp.at<uchar>(cur) != 255) { uchar curPixel = gray.at<uchar>(cur); if (abs(curPixel - seedPixel) < diffThreshold) { temp.at<uchar>(cur) = 255; pointStack.push(cur); } } } } if (pointStack.empty()) { bitwise_or(mMask, temp, mMask); } } }
bool ContourModel::update(vector<vector<Point> > contours,vector<Point2d>& originalPoints, int image_w_half) { //step 1: find the larger contours to filter out some noise (area > thresh) vector<vector<Point> > largeContours; int areaThreshold = 130; for(int i = 0;i < (int)contours.size();i++) { vector<Point> currCont = contours.at(i); double area = contourArea(contours.at(i)); if(area > areaThreshold) { largeContours.push_back(currCont); } } //step 2: for each larger contour: find the center of mass and the lane direction to group them vector<Point2d> mass_centers; vector<Point2d> line_directions; for(int i = 0;i < (int)largeContours.size();i++) { //calculate the line direction for each contour Vec4f lineParams; fitLine(largeContours.at(i), lineParams, CV_DIST_L2, 0, 0.01, 0.01); Point2d lineDirection(lineParams[0],lineParams[1]); line_directions.push_back(lineDirection); //calculate the mass center for each contour vector<Moments> contourMoments; Moments currMoments = moments(largeContours.at(i)); double x_cent = currMoments.m10 / currMoments.m00; double y_cent = currMoments.m01 / currMoments.m00; Point2d mass_cent(x_cent,y_cent); mass_centers.push_back(mass_cent); } //assert these vectors have same length: if(largeContours.size() != mass_centers.size())cout << "ERROR in ContourModel: massCenters.size != largeContours.size()" << endl; if(largeContours.size() != line_directions.size())cout << "ERROR in ContourModel: massCenters.size != largeContours.size()" << endl; //step 3: create the "mergeList": store for each contour weather it wants to merge with another one vector<vector<int> > mergelist; //merge contours based on center of mass and line direction for(int i = 0;i < (int)largeContours.size();i++) { vector<int> mergeWishes; Point2d currCenter = mass_centers.at(i); Point2d currDirection = line_directions.at(i); for(int j = i+1;j < (int)largeContours.size();j++) { Point2d compCenter = mass_centers.at(j); Point2d compDirection = line_directions.at(j); bool wantMerge = mergeContours(currCenter, currDirection, compCenter, compDirection); if(wantMerge)mergeWishes.push_back(j); } mergelist.push_back(mergeWishes); } //step 4: use the mergeList to create the final_mergelist which looks as follows: //[ [0,2,5] [3] [1] [4,6]] telling which contours should be merged together vector<vector<int> > final_mergelist; for(int i = 0;i < (int)largeContours.size();i++) { vector<int> temp; temp.push_back(i); final_mergelist.push_back(temp); } for(int i = 0;i < (int)largeContours.size();i++) { vector<int>* containerToPushTo = NULL; //step 1: find the container the contour i is in - note that this will always succeed so containerToPushTo wont stay NULL for(int j = 0;j < (int)final_mergelist.size();j++) { vector<int>* currContainer; currContainer = &final_mergelist.at(j); for(int k = 0;k < (int)final_mergelist.at(j).size();k++) { if(final_mergelist.at(j).at(k) == i) { containerToPushTo = currContainer; } } } //step2: for each element to push: make sure it appears in the container for(int j = 0;j < (int)mergelist.at(i).size();j++) { int elemToMerge = mergelist.at(i).at(j); //if elemToMerge already appears in containerToPushTo => do nothing bool alreadyInContainer = false; for(int k = 0;k < (int)containerToPushTo->size();k++) { if(containerToPushTo->at(k) == elemToMerge) alreadyInContainer = true; } //not inside: push the element and delete it from the old vector it was in if(!alreadyInContainer) { //delete it from the old container!! for(int k = 0;k < (int)final_mergelist.size();k++) { for(int l = 0;l < (int)final_mergelist.at(k).size();l++) { //DEBUG IFS - ERASE LATER if(k < 0 || k >= (int)final_mergelist.size())cout << "OVERFLOW IN 159::ContourModel" << endl; if(l < 0 || l >= (int)final_mergelist.at(k).size())cout << "OVERFLOW IN 160::ContourModel" << endl; if(final_mergelist.at(k).at(l) == elemToMerge) { //DEBUG IF- ERASE LATER if(l < 0 || l >= (int)final_mergelist.at(k).size()) cout << "ERROR ContourModel 162" << endl; final_mergelist.at(k).erase(final_mergelist.at(k).begin()+l); } } } //add it in the new container containerToPushTo->push_back(elemToMerge); } } } //step 5: merge the contours together vector< vector<vector<Point> > > mergedContours; for(int i = 0;i < (int)final_mergelist.size();i++) { vector<vector<Point> > currGrouping; for(int j = 0;j < (int)final_mergelist.at(i).size();j++) { vector<Point> currContour = largeContours.at(final_mergelist.at(i).at(j)); currGrouping.push_back(currContour); } if(currGrouping.size() > 0)mergedContours.push_back(currGrouping); } //TRY TO FIND THE MIDDLE LANE vector<vector<Point> > singleContours; vector<vector<vector<Point> > > multipleContours; for(int i = 0;i < (int)mergedContours.size();i++) { vector<vector<Point> > currContGroup = mergedContours.at(i); if(currContGroup.size() == 1) singleContours.push_back(currContGroup.at(0)); else if(currContGroup.size() > 1) multipleContours.push_back(currContGroup); } //in this situation there is actually a chance to apply the middle lane extraction, otherwise the old procedure is applied if(multipleContours.size() == 1 && singleContours.size() <= 2 && singleContours.size() > 0) { //sort single contours by area std::sort(singleContours.begin(),singleContours.end(),acompareCont); vector<Point> largestSingleContour = singleContours.at(singleContours.size()-1); double areaLargestSingle = contourArea(largestSingleContour); vector<vector<Point> > middleContour = multipleContours.at(0); double areaMiddle = 0; bool validMid = true; for(int i = 0;i < (int)middleContour.size();i++) { double areaCurr = contourArea(middleContour.at(i)); if(areaCurr > areaLargestSingle/2.0){ validMid = false; } areaMiddle += contourArea(middleContour.at(i)); } //if both contours have a certain size if(areaLargestSingle > 120 && areaMiddle > 120) { //MIDDLE LANE AND OTHER LANE FOUND => RETURN THE ESTIMATE //first argument will be the middle lane //second argument will be the other larger lane vector<vector<Point2d> > nicelyGroupedPoints; //1) --- MIDDLE LANE --- vector<Point2d> temp_result; for(int i = 0;i < (int)middleContour.size();i++) { vector<Point> currCont = middleContour.at(i); Rect bound = boundingRect(currCont); //visit every point in the bounding rect for(int y = bound.y;y < bound.y+bound.height;y++) { for(int x = bound.x;x < bound.x+bound.width;x++) { if(pointPolygonTest(currCont, Point(x,y), false) >= 0) { temp_result.push_back(Point2d(x-image_w_half,y)); } } } } nicelyGroupedPoints.push_back(temp_result); //2) --- OTHER LANE --- vector<Point2d> temp_result2; Rect bound = boundingRect(largestSingleContour); //visit every point in the bounding rect for(int y = bound.y;y < bound.y+bound.height;y++) { for(int x = bound.x;x < bound.x+bound.width;x++) { if(pointPolygonTest(largestSingleContour, Point(x,y), false) >= 0) { temp_result2.push_back(Point2d(x-image_w_half,y)); } } } if(validMid) { nicelyGroupedPoints.push_back(temp_result2); points = nicelyGroupedPoints; return true; //middle lane estimate provided } } } //MIDDLE LANE WAS NOT FOUND //step 6: get the final result: the grouped points matching the contours //need to perform a inside contour check within the bounding rectangle of the contour for //each point in the bounding rectangle vector<vector<Point2d> > nicelyGroupedPoints; for(int i = 0;i < (int)mergedContours.size();i++) { vector<Point2d> temp_result; for(int j = 0;j < (int)mergedContours.at(i).size();j++) { vector<Point> currContour = mergedContours.at(i).at(j); Rect bound = boundingRect(currContour); //visit every point in the bounding rect for(int y = bound.y;y < bound.y+bound.height;y++) { for(int x = bound.x;x < bound.x+bound.width;x++) { if(pointPolygonTest(currContour, Point(x,y), false) >= 0) { temp_result.push_back(Point2d(x-image_w_half,y)); } } } } if(temp_result.size() > 0) { nicelyGroupedPoints.push_back(temp_result); } } /* //step 6 (alternative): get the final result: the grouped points matching the contours //need to perform a inside contour check for the input points if in boundary rectangle of the contour vector<vector<Point2d> > nicelyGroupedPoints; for(int i = 0;i < mergedContours.size();i++) { vector<Point2d> temp_result; for(int j = 0;j < mergedContours.at(i).size();j++) { vector<Point> currContour = mergedContours.at(i).at(j); Rect bound = boundingRect(currContour); for(int k = 0;k < originalPoints.size();k++) { //check if within the contour: if(pointPolygonTest(currContour, originalPoints.at(k), false) >= 0) { temp_result.push_back(Point2d(originalPoints.at(k).x-image_w_half, originalPoints.at(k).y)); } } } if(temp_result.size() > 0) { nicelyGroupedPoints.push_back(temp_result); } } */ points = nicelyGroupedPoints; return false; //everything as usual, no further information }
// 计算点所在的连通域外接矩形,并绘制 bool CAnswerCardApp::GetArchorPointRect(QPoint pt,int ap_idx,bool is_draw) { //输入有效性检查 if (ap_idx<0||ap_idx>3) { return false; } //连通域检测 vector<vector<cv::Point> > contours; vector<Vec4i>hierarchy; CvRect roirect; int roisize=img_bw.rows*0.05; roirect.width=roirect.height=2*roisize; roirect.x=min(max(0,pt.x()-roisize),img_bw.cols-roirect.width); roirect.y=min(max(0,pt.y()-roisize),img_bw.rows-roirect.height); Mat roi; img_bw(roirect).copyTo(roi); findContours(roi,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE ); bool isFound =false; if( !contours.empty()) { for( unsigned int i = 0; i< contours.size(); i++ ) { //不在多边形内部 if (pointPolygonTest(contours.at(i),cv::Point(pt.x()-roirect.x,pt.y()-roirect.y),false)<0) continue; anchorPoints[ap_idx] = boundingRect(contours.at(i)); anchorPoints[ap_idx].x += roirect.x; anchorPoints[ap_idx].y += roirect.y; isFound = true; } } //是否需要绘制 if (is_draw) { //判断数据与图像是否同步 if (is_synchronous) { switch(ap_idx) { case 0: cv::putText(img_show,"1",cv::Point2d(anchorPoints[ap_idx].x,anchorPoints[ap_idx].y+anchorPoints[ap_idx].height*0.9),FONT_HERSHEY_COMPLEX,.6,anchor_point_color,2); break; case 1: cv::putText(img_show,"2",cv::Point2d(anchorPoints[ap_idx].x,anchorPoints[ap_idx].y+anchorPoints[ap_idx].height*0.9),FONT_HERSHEY_COMPLEX,.6,anchor_point_color,2); break; case 2: cv::putText(img_show,"3",cv::Point2d(anchorPoints[ap_idx].x,anchorPoints[ap_idx].y+anchorPoints[ap_idx].height*0.9),FONT_HERSHEY_COMPLEX,.6,anchor_point_color,2); break; case 3: cv::putText(img_show,"4",cv::Point2d(anchorPoints[ap_idx].x,anchorPoints[ap_idx].y+anchorPoints[ap_idx].height*0.9),FONT_HERSHEY_COMPLEX,.6,anchor_point_color,2); break; } rectangle(img_show,anchorPoints[ap_idx],anchor_point_color,anchor_point_thickness); }else is_synchronous = false; } //是否找到对应的连通域 if (isFound) return true; else return false; }