vector<float> feature::getPAR(IplImage *src, int mask) { float perimeter, area, rc, i; perimeter = area = i = 0; CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contours = 0; cvFindContours(src, storage, &contours, sizeof(CvContour), CV_RETR_LIST); if (contours) { CvSeq* c = contours; for (; c != NULL; c = c->h_next) { if (cvContourArea(c) < 1000) continue; perimeter += cvArcLength(c); area += cvContourArea(c); // perimeter = perimeter > cvArcLength(c) ? perimeter : cvArcLength(c); // area = area > cvContourArea(c) ? area : cvContourArea(c); i++; //qDebug("\tmask = %d, i = %d, perimeter = %f, area = %f", mask, i, perimeter, area); } } if (area == 0) rc = -1; else rc = perimeter * perimeter / (4 * 3.14 * area); //form feature based on mask vector<float> PAR({perimeter, area, rc}); if (mask == 2) { PAR.push_back(i); } cvReleaseMemStorage(&storage); return PAR; }
/** - FUNCTION: CBlobGetHullPerimeter - FUNCTIONALITY: Calculates the convex hull perimeter of the blob - PARAMETERS: - RESULT: - returns the convex hull perimeter of the blob or the perimeter if the blob edges could not be retrieved - RESTRICTIONS: - AUTHOR: Ricard Borr� - CREATION DATE: 25-05-2005. - MODIFICATION: Date. Author. Description. */ double CBlobGetHullPerimeter::operator()(const CBlob &blob) const { if(blob.Edges() != NULL && blob.Edges()->total > 0) { CvSeq *hull = cvConvexHull2( blob.Edges(), 0, CV_CLOCKWISE, 1 ); return fabs(cvArcLength(hull,CV_WHOLE_SEQ,1)); } return blob.Perimeter(); }
/* * call-seq: * arc_length(<i>[slice = nil][,is_closed = nil]</i>) -> float * * Calculates contour perimeter or curve length. * <i>slice</i> is starting and ending points of the curve. * <i>is_closed</i> is indicates whether the curve is closed or not. There are 3 cases: * * is_closed = true - the curve is assumed to be unclosed. * * is_closed = false - the curve is assumed to be closed. * * is_closed = nil (default) use self#close? */ VALUE rb_arc_length(int argc, VALUE *argv, VALUE self) { VALUE slice, is_closed; rb_scan_args(argc, argv, "02", &slice, &is_closed); return rb_float_new(cvArcLength(CVARR(self), NIL_P(slice) ? CV_WHOLE_SEQ : VALUE_TO_CVSLICE(slice), TRUE_OR_FALSE(is_closed, -1))); }
int Contours::perimeterFilter(double min_per,double max_per){ double per; per = cvArcLength(this->c, CV_WHOLE_SEQ, 1); return per> min_per && per<max_per; }
double BlobGetHullPerimeter::operator()(Blob &blob) { CvSeq *convexHull; double perimeter; convexHull = blob.GetConvexHull(); if( convexHull ) perimeter = fabs(cvArcLength(convexHull,CV_WHOLE_SEQ,1)); else return 0; cvClearSeq(convexHull); return perimeter; }
CvPoint CTools::QuartzPostion(IplImage* src, IplImage* dst) { CvMemStorage * storage = cvCreateMemStorage(0); CvSeq * contour = 0; int mode = CV_RETR_EXTERNAL; double length; CvPoint2D32f center; float r; CvPoint pt; pt.y = 1000; pt.x = 0; CalibrateData m_CalDat; GetCalirateParam(&m_CalDat); IplImage* temp = cvCreateImage(cvGetSize(src), 8, 1); cvCanny(src, temp, 50, 100); cvFindContours(temp, storage, &contour, sizeof(CvContour), mode); for( CvSeq* c = contour; c != NULL; c = c->h_next) { c = cvApproxPoly( c, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 5, 1 ); length = cvArcLength(c, CV_WHOLE_SEQ, -1); if ((length > m_CalDat.WaferPxLow) && (length < m_CalDat.WaferPxHigh)) { cvDrawContours(dst, c, CV_RGB(0,0,255), CV_RGB(255, 0, 0), -1, 2, 8); cvMinEnclosingCircle(c, ¢er, &r); if ((center.y > 336) && (center.y < pt.y)) { pt = cvPointFrom32f(center); } //pt[num] = cvPointFrom32f(center); //cvCircle(pContoursImg, pt[num], 3, CV_RGB(0,0,255), -1); //cvCircle(pContoursImg, pt[num], r, CV_RGB(0,0,255), 2); } } cvCircle(dst, pt, 10, CV_RGB(255,0, 0), -1); cvReleaseImage(&temp); cvClearMemStorage( storage ); cvReleaseMemStorage( &storage ); return pt; }
//各種輪郭の特徴量の取得 void GetContourFeature(CvSeq *Contour) { //面積 double Area = fabs(cvContourArea(Contour, CV_WHOLE_SEQ)); //周囲長 double Perimeter = cvArcLength(Contour); //円形度 double CircleLevel = 4.0 * CV_PI * Area / (Perimeter * Perimeter); //傾いていない外接四角形領域(フィレ径) CvRect rect = cvBoundingRect(Contour); if(perimeter_max < Perimeter) { perimeter_max = Perimeter; max_perimeter_contor = Contour; } }
int* getAvailablePlaces(IplImage *image){ CvMemStorage *storage = cvCreateMemStorage(0); CvSeq *contours = 0; cvFindContours(image, storage, &contours); CvRect rect; double s = 0; double p = 0; int x; int y; int arr[] = {0, 0, 0, 0, 0, 0}; for(CvSeq *seq = contours; seq != 0; seq = seq->h_next){ s = cvContourArea(seq); p = cvArcLength(seq); if(abs(p*p/s) < 4*3.14 + 2 && s > 90){ rect = cvBoundingRect(seq); x = rect.x + rect.width/2; y = rect.y + rect.height/2; if(30 < y && y < 50){ if(150 < x && x < 180){ arr[1] = 1; } if(200 < x && x < 220){ arr[3] = 1; } if(240 < x && x < 270){ arr[5] = 1; } } if(190 < y && y < 210){ if(150 < x && x < 180){ arr[0] = 1; } if(200 < x && x < 220){ arr[2] = 1; } if(240 < x && x < 270){ arr[4] = 1; } } } } return arr; }
int findStableMatches( CvSeq *seq, float minRad, float maxRad, CandidatePtrVector& kps, IplImage* bin ) { // Return value int retVal = -1; // Threshold Contour entries size int elements = seq->total; if( elements < 8 ) { return retVal; } // Gather statistics CvRect rect = cvBoundingRect( seq ); int high = ( rect.height < rect.width ? rect.width : rect.height ); int low = ( rect.height < rect.width ? rect.height : rect.width ); // If bounding box is very small simply return if( low < minRad*2 ) { return retVal; } // Allocate Contour array CvPoint *group_pos = (CvPoint*) malloc(elements * sizeof(CvPoint)); cvCvtSeqToArray(seq, group_pos, CV_WHOLE_SEQ); // Calculate arc and downsampling statistics double arc_length = cvArcLength( seq ); double arc_approx = arc_length / 10; double rect_approx = 12*(float)high / (float)low; double downsample = 2 * elements / (rect_approx + arc_approx); double ds_length = arc_length / 4; // Perform downsampling int maxSize = downsample * elements; int newSize = 0; CvPoint *dsed = (CvPoint*) malloc(maxSize * sizeof(CvPoint)); dsed[0] = CvPoint( group_pos[0] ); CvPoint last = CvPoint( dsed[0] ); newSize++; for( int i = 1; i < elements; i++ ) { double dist_so_far = dist_squared( group_pos[i], last ); if( dist_so_far > ds_length && newSize < maxSize ) { dsed[newSize] = CvPoint( group_pos[i] ); newSize++; last = CvPoint( group_pos[i] ); } } // Check to make sure reduced Contour size is sufficient [quickfix: todo revise above] if( newSize < 6 ) { free(group_pos); free(dsed); return -1; } // Fit Ellipse CvPoint2D32f* input = (CvPoint2D32f*)malloc(newSize*sizeof(CvPoint2D32f)); for( int i=0; i<newSize; i++ ) { input[i].x = dsed[i].x; input[i].y = dsed[i].y; } CvBox2D* box = (CvBox2D*)malloc(sizeof(CvBox2D)); cvFitEllipse( input, newSize, box ); // Threshold size float esize = PI*box->size.height*box->size.width/4.0f; if( esize < PI*maxRad*maxRad ) { // Add Candidate *kp = new Candidate; kp->angle = box->angle; kp->r = box->center.y; kp->c = box->center.x; kp->minor = box->size.width/2; kp->major = box->size.height/2; kp->magnitude = 0; kp->method = ADAPTIVE; kps.push_back( kp ); retVal = 0; } else { // Interest point too large retVal = 1; } // Deallocations free(box); free(input); free(group_pos); free(dsed); return retVal; }
void MouthContours::execute(IplImage* img, IplImage* drw, CvRect mouthSearch){ CvSeq* contours; if(CV_IS_IMAGE(imgGrey)){ cvReleaseImage(&imgGrey); } if(CV_IS_IMAGE(imgTempl)){ cvReleaseImage(&imgTempl); } allocateOnDemand( &storageTeeth ); allocateOnDemand( &imgTempl, cvSize( img->width, img->height ), IPL_DEPTH_8U, 3 ); cvCopy( img, imgTempl, 0 ); allocateOnDemand( &imgGrey, cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 ); if(CV_IS_STORAGE((storageTeeth))){ contours = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storageTeeth ); cvCvtColor( imgTempl, imgGrey, CV_BGR2GRAY ); int sigma = 1; int ksize = (sigma*5)|1; cvSetImageROI(imgGrey, mouthSearch); cvSetImageROI(drw, mouthSearch); cvSmooth( imgGrey , imgGrey, CV_GAUSSIAN, ksize, ksize, sigma, sigma); //cvEqualizeHist( small_img_grey, small_img_grey ); cvCanny( imgGrey, imgGrey, 70, 70, 3 ); cvDilate( imgGrey, imgGrey, NULL, 1 ); cvErode( imgGrey, imgGrey, NULL, 1 ); cvFindContours( imgGrey, storageTeeth, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); if(CV_IS_SEQ(contours)){ contours = cvApproxPoly( contours, sizeof(CvContour), storageTeeth, CV_POLY_APPROX_DP, 5, 1 ); if( contours->total > 0 ){ for( ;contours; contours = contours->h_next ){ if( contours->total < 4 ) continue; cvDrawContours( drw, contours, CV_RGB(255,0,0), CV_RGB(0,255,0), 5, 1, CV_AA, cvPoint(0,0) ); MouthContours::TeethArcLength = cvArcLength( contours, CV_WHOLE_SEQ, -1); MouthContours::TeethAreaContour = cvContourArea( contours, CV_WHOLE_SEQ); time_t ltime; struct tm *Tm; ltime=time(NULL); Tm=localtime(<ime); MouthContours::MouthHH = Tm->tm_hour; MouthContours::MouthMM = Tm->tm_min; MouthContours::MouthSS = Tm->tm_sec; } }else{ MouthContours::MouthHH = 0; MouthContours::MouthMM = 0; MouthContours::MouthSS = 0; MouthContours::TeethArcLength = 0; MouthContours::TeethAreaContour = 0; } }else{ MouthContours::MouthHH = 0; MouthContours::MouthMM = 0; MouthContours::MouthSS = 0; MouthContours::TeethArcLength = 0; MouthContours::TeethAreaContour = 0; } cvClearMemStorage( storageTeeth ); } cvResetImageROI(imgGrey); cvResetImageROI(drw); }
double THISCLASS::GetContourCompactness(const void* contour) { double l = cvArcLength(contour, CV_WHOLE_SEQ, 1); return fabs(12.56*cvContourArea(contour) / (l*l)); }
//-------------------------------------------------------------------------------- int ContourFinder::findContours( ofxCvGrayscaleImage& input, int minArea, int maxArea, int nConsidered, double hullPress, bool bFindHoles, bool bUseApproximation) { reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ContourFinder objects then to use // one, because you will get penalized less. if( inputCopy.width == 0 ) { inputCopy.allocate( input.width, input.height ); inputCopy = input; } else { if( inputCopy.width == input.width && inputCopy.height == input.height ) inputCopy = input; else { // we are allocated, but to the wrong size -- // been checked for memory leaks, but a warning: // be careful if you call this function with alot of different // sized "input" images!, it does allocation every time // a new size is passed in.... //inputCopy.clear(); inputCopy.allocate( input.width, input.height ); inputCopy = input; } } CvSeq* contour_list = NULL; contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list, sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); CvSeq* contour_ptr = contour_list; nCvSeqsFound = 0; // put the contours from the linked list, into an array for sorting while( (contour_ptr != NULL) ) { CvBox2D box = cvMinAreaRect2(contour_ptr); int objectId; // If the contour is an object, then objectId is its ID objectId = (bTrackObjects)? templates->getTemplateId(box.size.width,box.size.height): -1; if(objectId != -1 ) { //If the blob is a object Blob blob = Blob(); blob.id = objectId; blob.isObject = true; float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ ); cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = rect.x; blob.boundingRect.y = rect.y; blob.boundingRect.width = rect.width; blob.boundingRect.height = rect.height; //For anglebounding rectangle blob.angleBoundingBox=box; blob.angleBoundingRect.x = box.center.x; blob.angleBoundingRect.y = box.center.y; blob.angleBoundingRect.width = box.size.height; blob.angleBoundingRect.height = box.size.width; blob.angle = box.angle; //TEMPORARY INITIALIZATION TO 0, Will be calculating afterwards.This is to prevent sending wrong data blob.D.x = 0; blob.D.y = 0; blob.maccel = 0; // assign other parameters blob.area = fabs(area); blob.hole = area < 0 ? true : false; blob.length = cvArcLength(contour_ptr); blob.centroid.x = (myMoments->m10 / myMoments->m00); blob.centroid.y = (myMoments->m01 / myMoments->m00); blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( contour_ptr, &reader, 0 ); for( int j=0; j < contour_ptr->total; j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blob.nPts = blob.pts.size(); objects.push_back(blob); } else if(bTrackBlobs) { // SEARCH FOR BLOBS float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { Blob blob=Blob(); float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ ); cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = rect.x; blob.boundingRect.y = rect.y; blob.boundingRect.width = rect.width; blob.boundingRect.height = rect.height; //Angle Bounding rectangle blob.angleBoundingRect.x = box.center.x; blob.angleBoundingRect.y = box.center.y; blob.angleBoundingRect.width = box.size.height; blob.angleBoundingRect.height = box.size.width; blob.angle = box.angle; // assign other parameters blob.area = fabs(area); blob.hole = area < 0 ? true : false; blob.length = cvArcLength(contour_ptr); // AlexP // The cast to int causes errors in tracking since centroids are calculated in // floats and they migh land between integer pixel values (which is what we really want) // This not only makes tracking more accurate but also more fluid blob.centroid.x = (myMoments->m10 / myMoments->m00); blob.centroid.y = (myMoments->m01 / myMoments->m00); blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( contour_ptr, &reader, 0 ); for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, contour_ptr->total); j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blob.nPts = blob.pts.size(); blobs.push_back(blob); } } contour_ptr = contour_ptr->h_next; } if(bTrackFingers) { // SEARCH FOR FINGERS CvPoint* PointArray; int* hull; int hullsize; if (contour_list) contour_list = cvApproxPoly(contour_list, sizeof(CvContour), storage, CV_POLY_APPROX_DP, hullPress, 1 ); for( ; contour_list != 0; contour_list = contour_list->h_next ){ int count = contour_list->total; // This is number point in contour CvRect rect = cvContourBoundingRect(contour_list, 1); if ( (rect.width*rect.height) > 300 ){ // Analize the bigger contour CvPoint center; center.x = rect.x+rect.width/2; center.y = rect.y+rect.height/2; PointArray = (CvPoint*)malloc( count*sizeof(CvPoint) ); // Alloc memory for contour point set. hull = (int*)malloc(sizeof(int)*count); // Alloc memory for indices of convex hull vertices. cvCvtSeqToArray(contour_list, PointArray, CV_WHOLE_SEQ); // Get contour point set. // Find convex hull for curent contour. cvConvexHull( PointArray, count, NULL, CV_COUNTER_CLOCKWISE, hull, &hullsize); int upper = 640, lower = 0; for (int j=0; j<hullsize; j++) { int idx = hull[j]; // corner index if (PointArray[idx].y < upper) upper = PointArray[idx].y; if (PointArray[idx].y > lower) lower = PointArray[idx].y; } float cutoff = lower - (lower - upper) * 0.1f; // find interior angles of hull corners for (int j=0; j<hullsize; j++) { int idx = hull[j]; // corner index int pdx = idx == 0 ? count - 1 : idx - 1; // predecessor of idx int sdx = idx == count - 1 ? 0 : idx + 1; // successor of idx cv::Point v1 = cv::Point(PointArray[sdx].x - PointArray[idx].x, PointArray[sdx].y - PointArray[idx].y); cv::Point v2 = cv::Point(PointArray[pdx].x - PointArray[idx].x, PointArray[pdx].y - PointArray[idx].y); float angle = acos( (v1.x*v2.x + v1.y*v2.y) / (norm(v1) * norm(v2)) ); // low interior angle + within upper 90% of region -> we got a finger if (angle < 1 ){ //&& PointArray[idx].y < cutoff) { Blob blob = Blob(); //float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ ); //cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box //CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = PointArray[idx].x-5; blob.boundingRect.y = PointArray[idx].y-5; blob.boundingRect.width = 10; blob.boundingRect.height = 10; //Angle Bounding rectangle blob.angleBoundingRect.x = PointArray[idx].x-5; blob.angleBoundingRect.y = PointArray[idx].y-5; blob.angleBoundingRect.width = 10; blob.angleBoundingRect.height = 10; blob.angle = atan2((float) PointArray[idx].x - center.x , (float) PointArray[idx].y - center.y); // assign other parameters //blob.area = fabs(area); //blob.hole = area < 0 ? true : false; //blob.length = cvArcLength(contour_ptr); // AlexP // The cast to int causes errors in tracking since centroids are calculated in // floats and they migh land between integer pixel values (which is what we really want) // This not only makes tracking more accurate but also more fluid blob.centroid.x = PointArray[idx].x;//(myMoments->m10 / myMoments->m00); blob.centroid.y = PointArray[idx].y;//(myMoments->m01 / myMoments->m00); blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; fingers.push_back(blob); } } // Free memory. free(PointArray); free(hull); } } } nBlobs = blobs.size(); nFingers = fingers.size(); nObjects = objects.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) cvReleaseMemStorage(&contour_storage); if( storage != NULL ) cvReleaseMemStorage(&storage); return (bTrackFingers)? nFingers:nBlobs; }
//-------------------------------------------------------------------------------- int ofxCvMyContourFinder::findContours( IplImage* input, int minArea, int maxArea, int nConsidered, bool bFindHoles, int approximation) { // get width/height disregarding ROI _width = input->width; _height = input->height; reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ofxCvMyContourFinder objects then to use // one, because you will get penalized less. if( !inputCopy ) { inputCopy = cvCreateImage(cvSize(_width,_height), input->depth, input->nChannels); } else if( inputCopy->width != _width || inputCopy->height != _height ) { // reallocate to new size cvReleaseImage(&inputCopy); inputCopy = cvCreateImage(cvSize(_width,_height), input->depth, input->nChannels); } cvSetImageROI(inputCopy, cvGetImageROI(input)); cvCopy(input, inputCopy); contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; CvContourScanner scanner = cvStartFindContours( inputCopy, contour_storage, sizeof(CvContour), retrieve_mode, CV_CHAIN_APPROX_SIMPLE); CvSeq* c; int numCont = 0; while(( c = cvFindNextContour(scanner)) != NULL) { CvSeq* c_new; if( approximation > 0){ c_new = cvApproxPoly( c, sizeof(CvContour), contour_storage, CV_POLY_APPROX_DP, approximation, 0 ); } else { c_new = cvConvexHull2( c, contour_storage, CV_CLOCKWISE, 1 ); } float area = fabs( cvContourArea(c_new, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { cvSeqBlobs.push_back(c_new); } numCont++; } // cvEndFindContours(scanner); // sort the pointers based on size if( cvSeqBlobs.size() > 1 ) { sort( cvSeqBlobs.begin(), cvSeqBlobs.end(), mysort_carea_compare ); } // now, we have cvSeqBlobs.size() contours, sorted by size in the array // cvSeqBlobs let's get the data out and into our structures that we like for( int i = 0; i < MIN(nConsidered, (int)cvSeqBlobs.size()); i++ ) { myblobs.push_back( ofxCvMyBlob() ); float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ ); CvRect rect = cvBoundingRect( cvSeqBlobs[i], 0 ); cvMoments( cvSeqBlobs[i], myMoments ); myblobs[i].area = fabs(area); myblobs[i].hole = area < 0 ? true : false; myblobs[i].length = cvArcLength(cvSeqBlobs[i]); myblobs[i].boundingRect.x = rect.x; myblobs[i].boundingRect.y = rect.y; myblobs[i].boundingRect.width = rect.width; myblobs[i].boundingRect.height = rect.height; if(cvSeqBlobs[i]->total >= 6){ myblobs[i].box2D_cv = cvMinAreaRect2(cvSeqBlobs[i]); } myblobs[i].bounding_cv = cvBoundingRect(cvSeqBlobs[i]); double x = (myMoments->m10 / myMoments->m00); double y = (myMoments->m01 / myMoments->m00); myblobs[i].centroid.x = (int)x; myblobs[i].centroid.y = (int)y; myblobs[i].centroid_cv = cvPoint2D32f(x,y); // myblobs[i].contour = (CvPoint *)malloc(cvSeqBlobs[i]->total * sizeof(CvPoint)); // cvCvtSeqToArray(cvSeqBlobs[i], myblobs[i].contour, CV_WHOLE_SEQ); // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( cvSeqBlobs[i], &reader, 0 ); for( int j=0; j < cvSeqBlobs[i]->total; j++ ) { CV_READ_SEQ_ELEM( pt, reader ); myblobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } myblobs[i].nPts = myblobs[i].pts.size(); } nBlobs = myblobs.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); } if( storage != NULL ) { cvReleaseMemStorage(&storage); } return nBlobs; }
//-------------------------------------------------------------------------------- int ofxCvContourFinder::findContours( ofxCvGrayscaleImage& input, int minArea, int maxArea, int nConsidered, bool bFindHoles, bool bUseApproximation) { // get width/height disregarding ROI IplImage* ipltemp = input.getCvImage(); _width = ipltemp->width; _height = ipltemp->height; reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ofxCvContourFinder objects then to use // one, because you will get penalized less. if( inputCopy.getWidth() == 0 ) { inputCopy.setUseTexture(false); inputCopy.allocate( _width, _height ); } else if( inputCopy.getWidth() != _width || inputCopy.getHeight() != _height ) { // reallocate to new size inputCopy.clear(); inputCopy.setUseTexture(false); inputCopy.allocate( _width, _height ); } inputCopy.setROI( input.getROI() ); inputCopy = input; CvSeq* contour_list = NULL; contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list, sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); CvSeq* contour_ptr = contour_list; // put the contours from the linked list, into an array for sorting while( (contour_ptr != NULL) ) { float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { cvSeqBlobs.push_back(contour_ptr); } contour_ptr = contour_ptr->h_next; } // sort the pointers based on size if( cvSeqBlobs.size() > 1 ) { sort( cvSeqBlobs.begin(), cvSeqBlobs.end(), sort_carea_compare ); } // now, we have cvSeqBlobs.size() contours, sorted by size in the array // cvSeqBlobs let's get the data out and into our structures that we like for( int i = 0; i < MIN(nConsidered, (int)cvSeqBlobs.size()); i++ ) { blobs.push_back( ofxCvBlob() ); float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ ); CvRect rect = cvBoundingRect( cvSeqBlobs[i], 0 ); cvMoments( cvSeqBlobs[i], myMoments ); blobs[i].area = fabs(area); blobs[i].hole = area < 0 ? true : false; blobs[i].length = cvArcLength(cvSeqBlobs[i]); blobs[i].boundingRect.x = rect.x; blobs[i].boundingRect.y = rect.y; blobs[i].boundingRect.width = rect.width; blobs[i].boundingRect.height = rect.height; blobs[i].centroid.x = (myMoments->m10 / myMoments->m00); blobs[i].centroid.y = (myMoments->m01 / myMoments->m00); // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( cvSeqBlobs[i], &reader, 0 ); for( int j=0; j < cvSeqBlobs[i]->total; j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blobs[i].nPts = blobs[i].pts.size(); } nBlobs = blobs.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); } if( storage != NULL ) { cvReleaseMemStorage(&storage); } return nBlobs; }
int main() { bool salir=FALSE; do { IplImage *im; char eleccion; bool j=TRUE; //Panel printf("Elija la imagen que quiere cargar\n"); printf("Imagenes del programa:\n\n" "A=2_bolas\n" "B=3_bolas\n" "C=4_bolas\n" "D=6_bolas\n" "E=bola_azul\n" "F=bola_roja\n" "G=bolas_cortadas\n" "H=bola_amarilla_blanca\n" "I=bola_amarilla_blanca_+intensidad\n" "J=bola_amarilla1\n" "K=bolas_cortadas_+intensidad\n" "L=bolas_juntas\n" "M=cambio_angulo_iluminacion\n" "N=bolas_pegadas_1\n" "O=bolas_pegadas_2\n" "P=bolas_pegadas_3\n" "Q=bolas_pegadas_4\n" "R=bolas_pegadas_4_+intensidad\n" "S=bolas_pegadas_rotas\n" "T=bolas_pegadas_rotas_2\n" ); printf("X=SALIR\n\n"); while(j==TRUE) { scanf("%c",&eleccion); switch(eleccion) { case 'A':{ char NombreImagen[]="2_bolas.jpg"; im=cvLoadImage(NombreImagen, -1); j=FALSE;} break; case 'B': {char NombreImagen[]="3_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'C': { char NombreImagen[]="4_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'D': { char NombreImagen[]="6_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'E': { char NombreImagen[]="bola_azul.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'F': {char NombreImagen[]="bola_roja.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'G': {char NombreImagen[]="bolas_cortadas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'H': {char NombreImagen[]="bola_amarilla_blanca.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'I': { char NombreImagen[]="bola_amarilla_blanca_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'J': { char NombreImagen[]="bola_amarilla1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'K': { char NombreImagen[]="bolas_cortadas_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'L': { char NombreImagen[]="bolas_juntas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'M': {char NombreImagen[]="cambio_angulo_iluminacion.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'N': {char NombreImagen[]="bolas_pegadas_1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'O': {char NombreImagen[]="bolas_pegadas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'P': {char NombreImagen[]="bolas_pegadas_3.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'Q': {char NombreImagen[]="bolas_pegadas_4.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'R': {char NombreImagen[]="bolas_pegadas_4_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'S': {char NombreImagen[]="bolas_pegadas_rotas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'T': {char NombreImagen[]="bolas_pegadas_rotas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'X': {salir=TRUE; return 0;} break; default:{ printf("Eleccion incorrecta, vuelva a elegir una opcion\n"); j=TRUE; } } } //-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //OBTENER UNA IMAGEN BINARIA SÓLO CON BOLAS AZULES Y OTRA SÓLO CON BOLAS ROJAS IplImage *Imagen_RGB; IplImage *Imagen_umbr; IplImage *Imagen_umbr_2; CvSize Dimensiones; //umbrales de la imagenS y la imagenH. En esta parte no utilizo la función MinMax porque me sale mejor poniendo unos umbrales fijos int umbral1=150; int umbral2=100; //pasamos de BGR a RGB Dimensiones= cvGetSize(im); Imagen_RGB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3); cvCvtColor(im,Imagen_RGB,CV_BGR2RGB); IplImage *ImagenHSV; IplImage *ImagenH,*ImagenS,*ImagenV; //pasamos de RGB a HSV ImagenHSV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3); cvCvtColor(Imagen_RGB,ImagenHSV,CV_RGB2HSV); //Extraemos de la imagen HSV sus tres componentes: H, S y V ImagenH=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); ImagenS=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); ImagenV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); cvSplit(ImagenHSV,ImagenH,ImagenS,ImagenV,0); //imagenes binarias para umbralizar Sy H Imagen_umbr=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); Imagen_umbr_2=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); //umbralizacion. cvThreshold(ImagenS,Imagen_umbr,umbral1,255,CV_THRESH_BINARY); cvThreshold(ImagenH,Imagen_umbr_2,umbral2,255,CV_THRESH_BINARY_INV); //Descompongo la imagen en R,G y B IplImage *ImagenR=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); IplImage *ImagenG=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); IplImage *ImagenB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); cvSplit(Imagen_RGB,ImagenR,ImagenG,ImagenB,0); //A partir de aquí hago una serie de transformaciones morfológicas para separar en imágenes binarias las bolas azules de las rojas. //creo elemento estructurante IplConvKernel* element = 0; const int element_shape =CV_SHAPE_ELLIPSE; int pos=1; element= cvCreateStructuringElementEx(pos*2+1,pos*2+1,pos,pos, element_shape,0); IplImage * temp= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage *temp2=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage *resta=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); //con esto obtengo todas las bolas binarizadas cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_TOPHAT,2); //tophat. Me detecta sólo las sombras de las bolas. Mi iluminación iene de arriba. //cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2); Esto podria aplicarlo si las sombras se crearan en el lado contrario cvAbsDiff (Imagen_umbr, temp ,temp); //resto la original - el tophat cvMorphologyEx(temp,temp,temp, NULL,CV_MOP_CLOSE,6); //aplico el cierre //Con esto obtengo las bolas azules binarizadas cvMorphologyEx(Imagen_umbr_2,temp2,temp2, NULL,CV_MOP_TOPHAT,1); //tophat //cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2); cvAbsDiff (Imagen_umbr_2, temp2 ,temp2); //resto la original - el tophat cvMorphologyEx(temp2,temp2,temp2, NULL,CV_MOP_CLOSE,6); //aplico el cierre //Dilato y erosiono el mismo número de veces, para que las bolas me queden mas o menos del mismo tamaño. Además lo hago muchas veces(15), para eliminar los //máximos defectos posibles debido a sombras y cambios y contrastes debido a la iluminación cvDilate(temp2,temp2,element,15); cvErode(temp2,temp2,element,15); cvAbsDiff (temp2, temp ,resta); // Resto la imagen de todas las bolas -la imagen de las bolas azules, dilato mcuhas veces y erosiono muchas veces, //y finalmente solo me quedan las rojas cvDilate(resta,resta,element,15);//dilato cvErode(resta,resta,element,15);//erosiono //Puede que algun contorno no deseado aún permanezca en la imagen binaria. Como aplico las mismas transformaciones morfológicas a las dos imágenes binarias //tendré el mismo defecto en las dos imagenes, así que obtengo una imagen sólo los defectos, y después resto los defectos a las dos imágenes. IplImage * temp3= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * temp4= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * Im_defectos_comunes= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * Im_bolas_azules= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * Im_bolas_rojas= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); cvThreshold(temp2,temp3,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas rojas cvThreshold(resta,temp4,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas azules cvAnd(temp3,temp4,Im_defectos_comunes,NULL);//multiplico las dos imagenes, la imagen que obtengo solo aparecen los defectos comunes cvAbsDiff (temp2,Im_defectos_comunes,Im_bolas_azules);//resto los defectos a las bolas azules cvAbsDiff (resta, Im_defectos_comunes ,Im_bolas_rojas);//resto los defectos a las bolas rojas //Ya tengo una imagen binaria sólo con las bolas azules y otra sólo con las rojas. //------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //CALCULAR HISTOGRAMA DE LA IMAGEN G //Nueva imagen para dibujar el histograma IplImage *histImage; //Variables para el histograma int hist_size=256; int NivelGris; float NumPixels; //Estructura histograma para guardar la informacion CvHistogram *hist; //Nueva imagen para dibujar el histograma histImage = cvCreateImage(cvSize(256,256), 8, 1); //Estructura histograma para guardar la informacion hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY,NULL, 1); //calcular el histograma. Lo hago con la imagenG, ya que hay más contraste que en la imagen en escala de grises, pero también funcionaria con la imagen de escala de grises cvCalcHist(&ImagenG,hist,0,NULL); cvSetZero(histImage); long Histograma[256]; //dibujo el histograma for(NivelGris=0;NivelGris<hist_size;++NivelGris) { NumPixels=cvQueryHistValue_1D(hist,NivelGris)/15; cvLine(histImage,cvPoint(NivelGris,256),cvPoint(NivelGris,256-NumPixels),CV_RGB(255,255,255),1,8,0); Histograma[NivelGris]=NumPixels;//meto en un array el numero de pixels para cada nivel de gris } cvReleaseHist(&hist); cvSaveImage("Histograma.jpg",histImage,0); //------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ //UMBRALIZACIÓN DE LA IMAGEN G IplImage *imagen_bin; CvMemStorage *Memoria; CvSeq *Contorno, *Primer_Contorno; int Nc; //imagen=cvLoadImage("herramientas.tif",CV_LOAD_IMAGE_GRAYSCALE); imagen_bin=cvCreateImage(cvGetSize(ImagenG),8,1); //imagen_color=cvCreateImage(cvGetSize(ImagenG),8,3); //umbralizar la ImagenG int umbral; umbral=MinMax(Histograma); //Para algunas imagenes, debido a que tienen mas iluminacion o se introducen otros objetos como la mano, en el histograma las gausianas se juntan mucho o solo aparece //una. En este caso la función MinMAx() calcula un umbral muy alto y hace que no se detecten los contornos de algunas bolas, asi que establezco un umbral máximo if(umbral>100) { umbral=100; } cvLine(histImage,cvPoint(umbral,256),cvPoint(umbral,0),CV_RGB(255,255,255),1,8,0);//uDibujo el umbral en el histograma cvThreshold(ImagenG,imagen_bin,umbral,255,CV_THRESH_BINARY_INV);//Binarizo la imagen G cvMorphologyEx(imagen_bin,imagen_bin,imagen_bin, NULL,CV_MOP_CLOSE,6);//Alplico cierre para eliminar los cambios de contraste en el interior de las bolas //debido al reflejo al reflejo de la luz //--------------------------------------------------------------------------------------------------------------------------------------------------------------------- // CÁLCULO DE CONTORNOS, ÁREAS, PERÍMETROS, CAJAS Y CENTROS DE CAJA EN LA IMAGEN G. IplConvKernel* element_2 = 0; const int element_shape_2 =CV_SHAPE_ELLIPSE; int pos_2=1; element_2= cvCreateStructuringElementEx(pos_2*2+1,pos_2*2+1,pos_2,pos_2, element_shape_2,0); Memoria=cvCreateMemStorage(); bool k=FALSE; int n=0; bool pelotas_juntas=FALSE; int i; double *perimetro; double *area; CvBox2D *BoundBox; CvPoint *centro; int bolas_rotas_azules=0; int bolas_rotas_rojas=0; CvScalar s3; Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL); perimetro=(double*)malloc(Nc*sizeof(double)); area=(double*)malloc(Nc*sizeof(double)); BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D)); centro=(CvPoint*)malloc(Nc*sizeof(CvPoint)); for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i) { area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ); perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1); BoundBox[i]=cvMinAreaRect2(Contorno,NULL); } for(i=0;i<Nc;++i) { centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y); } //---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //DETECTAR BOLAS ROTAS IplImage * inv_bolas_azules, *inv_bolas_rojas; CvMemStorage *storage_2; CvMemStorage *storage_3; CvSeq *Contorno_2, *Primer_Contorno_2; CvSeq *Contorno_3, *Primer_Contorno_3; int Nc_2; int Nc_3; double *area_2; double *area_3; CvBox2D *BoundBox_2; CvBox2D *BoundBox_3; CvPoint *centro_2; CvPoint *centro_3; inv_bolas_azules=cvCreateImage(cvGetSize(Im_bolas_azules),8,1); inv_bolas_rojas=cvCreateImage(cvGetSize(Im_bolas_rojas),8,1); cvThreshold(Im_bolas_azules,inv_bolas_azules,128,255,CV_THRESH_BINARY_INV); cvThreshold(Im_bolas_rojas,inv_bolas_rojas,128,255,CV_THRESH_BINARY_INV); storage_2=cvCreateMemStorage(); storage_3=cvCreateMemStorage(); //detecto las bolas rotas azules Nc_2=cvFindContours(inv_bolas_azules,storage_2,&Primer_Contorno_2,sizeof(CvContour),CV_RETR_EXTERNAL); //Encuentro cotornos en la imagen binaria donde sólo aparecen //las bolas azules area_2=(double*)malloc(Nc_2*sizeof(double));//tamaño del vector area BoundBox_2=(CvBox2D*)malloc(Nc_2*sizeof(CvBox2D));//tamaño del vector BoundBox_2 centro_2=(CvPoint*)malloc(Nc_2*sizeof(CvPoint));//tamaño del vector centro_2 for(i=0,Contorno_2=Primer_Contorno_2;Contorno_2!=NULL;Contorno_2=Contorno_2->h_next,++i) { area_2[i]=cvContourArea(Contorno_2,CV_WHOLE_SEQ);//Hallo el area de cada contorno BoundBox_2[i]=cvMinAreaRect2(Contorno_2,NULL);//Hallo las caja de cada contorno } for(i=0;i<Nc_2;++i) { centro_2[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);// Hallo el centro de cada contorno } //Para cada contorno, si su area es menor que 2500, es que se trata de una bola rota for(i=0;i<Nc_2;++i) { if(area_2[i]<2500) { bolas_rotas_azules++; DibujarBox2D(im,BoundBox_2[i]); printf("Bola rota azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } //Detecto las bolas rotas rojas // Es el mismo procedimiento que para detectar las bolas rotas azules, pero encontrando contornos en la imagen binaria donde solo aparecen las bolas rojas Nc_3=cvFindContours(inv_bolas_rojas,storage_3,&Primer_Contorno_3,sizeof(CvContour),CV_RETR_EXTERNAL); area_3=(double*)malloc(Nc_3*sizeof(double)); BoundBox_3=(CvBox2D*)malloc(Nc_3*sizeof(CvBox2D)); centro_3=(CvPoint*)malloc(Nc*sizeof(CvPoint)); for(i=0,Contorno_3=Primer_Contorno_3;Contorno_3!=NULL;Contorno_3=Contorno_3->h_next,++i) { area_3[i]=cvContourArea(Contorno_3,CV_WHOLE_SEQ); BoundBox_3[i]=cvMinAreaRect2(Contorno_3,NULL); } for(i=0;i<Nc_3;++i) { centro_3[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y); } for(i=0;i<Nc_3;++i) { if(area_3[i]<2000) { bolas_rotas_rojas++; DibujarBox2D(im,BoundBox_3[i]); printf("Bola rota roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } //--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //CASO DE LAS BOLAS JUNTAS // En el caso de que haya dos o más bolas juntas, el programa encuentra un contorno con el área de todas las bolas que están juntas. Para solucionar este problema //utilizo el perímetro de los contornos. Elijo un valor umbral para el perímetro en el que me aseguro que se han separado todas las bolas. Así, si existe un perímetro //mayor al umbral, erosiono la imagen hasta que todos los perímetros sean menores que ese umbral. // Para detectar si hay bolas juntas, compruebo si existe algún controno que tenga el área mayor que el de una bola . for(i=0;i<Nc;++i) { if(area[i]>4000)//si existe el área de un contorno mayor al área de una bola { k=TRUE; pelotas_juntas=TRUE; } } while(k==TRUE)// Se mete en este bucle si ha encontrado algun área mayor que el de una bola { k=FALSE; Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL); perimetro=(double*)malloc(Nc*sizeof(double)); area=(double*)malloc(Nc*sizeof(double)); BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D)); centro=(CvPoint*)malloc(Nc*sizeof(CvPoint)); for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i) { area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ); perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1); BoundBox[i]=cvMinAreaRect2(Contorno,NULL); } for(i=0;i<Nc;++i) { centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y); } for(i=0;i<Nc;++i) { if(perimetro[i]>100) { k=TRUE; cvErode(imagen_bin,imagen_bin,element_2,1); } } } //------------------------------------------------------------------------------------------------------------------------------------------------------------ //CONOCER EL NÚMERO DE BOLAS DE CADA COLOR Y SUS RESPECTIVAS POSICIONES int bolas_azules=0; int bolas_rojas=0; int mano=0; double radio=0.0; CvScalar s; CvScalar s2; //Diferenciar bolas en el caso de que no haya bolas juntas if( pelotas_juntas==FALSE) { //Bolas azules for(i=0;i<Nc;++i)//bucle para todods los contornos { s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen de bolas azules if(s.val[0]==0)// si es 0,es que puede haber una bola azul o una bola rota azul { if(area[i]>2000 && area[i]<4000)//bola azul { bolas_azules++; radio=sqrt(area[i]/3.14); cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0x00,0xff,0xff)); printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } } //Bolas rojas for(i=0;i<Nc;++i)//bucle para todos los contornos { s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas if(s2.val[0]==0)// si es 0,es que puede haber bola roja o bola rota roja { if(area[i]>2000 && area[i]<4000)//bola roja { bolas_rojas++; radio=sqrt(area[i]/3.14); cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0xff,0x00,0x00)); printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } } } if( pelotas_juntas==TRUE) { float radio=30;//Como en el caso de qhe haya bolas juntas erosiono la imagen hasta separlas, no tengo las áreas reales de las bolas, así que //estipulo un radio aproximado . //Bolas azules for(i=0;i<Nc;++i) { s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen con bolas azules if(s.val[0]==0)// si es 0,es que hay bola azul. En este caso no existe la posibilidad de que haya bolas rotas porque al erosionar solo permanecen los contornos //con un perímetro mayor al de una bola. El perímetro de una bola rota siempre será menor { cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0x00,0xff,0xff)); bolas_azules++; printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } //Bolas rojas for(i=0;i<Nc;++i)//bucle para todos los contornos { s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas if(s2.val[0]==0)// si es 0,es que hay una bola roja { cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0xff,0x00,0x00)); bolas_rojas++; printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } } printf("bolas azules:%d\n",bolas_azules); printf("bolas rotas azules:%d\n", bolas_rotas_azules); printf("bolas rojas:%d\n",bolas_rojas); printf("bolas rotas rojas:%d\n\n",bolas_rotas_rojas); printf("ORDENAR AL ROBOT\n\n\n"); if(bolas_rotas_azules>0) { printf("METER BOLAS AZULES DEFECTUOSAS EN CAJA DE BOLAS AZULES DEFECTUOSAS\n\n"); } if(bolas_rotas_rojas>0) { printf("METER BOLAS ROJAS DEFECTUOSAS EN CAJA DE BOLAS ROJAS DEFECTUOSAS\n\n"); } if(bolas_azules>0 || bolas_rojas>0) { printf("EMPAQUETAR BOLAS\n\n"); } //---------------------------------------------------------------------------------------------------------------------------------------------------------------------- cvWaitKey(0); //-------------------------------------------------------------------------------------------------------------------------------------------------------------------- //PANTALLA cvNamedWindow("Original", CV_WINDOW_AUTOSIZE); cvShowImage("Original", im ); //cvNamedWindow("imagen_bin", CV_WINDOW_AUTOSIZE); //cvShowImage("imagen_bin", imagen_bin ); //Mostrar el plano de color rojo, verde y azul //cvNamedWindow("R", CV_WINDOW_AUTOSIZE); //cvShowImage("R",ImagenR); //cvNamedWindow("G", CV_WINDOW_AUTOSIZE); //cvShowImage("G",inv_bolas_azules); //cvNamedWindow("B", CV_WINDOW_AUTOSIZE); //cvShowImage("B",inv_bolas_rojas); cvNamedWindow("bolas_azules", CV_WINDOW_AUTOSIZE); cvShowImage("bolas_azules",Im_bolas_azules); cvNamedWindow("bolas_rojas", CV_WINDOW_AUTOSIZE); cvShowImage("bolas_rojas",Im_bolas_rojas); //Mostrar la imagen cvNamedWindow("Histograma de G", CV_WINDOW_AUTOSIZE); cvShowImage("Histograma de G", histImage ); cvWaitKey(0); //--------------------------------------------------------------------------------------------------------------------------------------------------------------- //LIBERAR MEMORIA cvDestroyAllWindows(); cvReleaseImage(&ImagenR); cvReleaseImage(&ImagenG); cvReleaseImage(&ImagenB); cvReleaseImage(&imagen_bin); cvReleaseImage(&histImage); cvReleaseImage(&im); cvReleaseImage(&Imagen_RGB); cvReleaseImage(&Imagen_umbr); cvReleaseImage(&Imagen_umbr_2); cvReleaseImage(&ImagenHSV); cvReleaseImage(&ImagenH); cvReleaseImage(&ImagenS); cvReleaseImage(&ImagenV); cvReleaseImage(&temp); cvReleaseImage(&temp2); cvReleaseImage(&temp3); cvReleaseImage(&temp4); cvReleaseImage(&Im_defectos_comunes); cvReleaseImage(&Im_bolas_azules); cvReleaseImage(&Im_bolas_rojas); cvReleaseImage(&inv_bolas_rojas); cvReleaseImage(&inv_bolas_azules); }while(salir==FALSE); return 0; }
/** - FUNCIÓ: ExternPerimeter - FUNCIONALITAT: Get extern perimeter (perimeter touching image borders) - PARÀMETRES: - maskImage: if != NULL, counts maskImage black pixels as external pixels and contour points touching them are counted as external contour points. - xBorder: true to consider blobs touching horizontal borders as extern - yBorder: true to consider blobs touching vertical borders as extern - RESULTAT: - - RESTRICCIONS: - - AUTOR: rborras - DATA DE CREACIÓ: 2008/05/05 - MODIFICACIÓ: Data. Autor. Descripció. - NOTA: If CBlobContour::GetContourPoints aproximates contours with a method different that NONE, this function will not give correct results */ double CBlob::ExternPerimeter( IplImage *maskImage, bool xBorder /* = true */, bool yBorder /* = true */) { t_PointList externContour, externalPoints; CvSeqReader reader; CvSeqWriter writer; CvPoint actualPoint, previousPoint; bool find = false; int i,j; int delta = 0; // it is calculated? if( m_externPerimeter != -1 ) { return m_externPerimeter; } // get contour pixels externContour = m_externalContour.GetContourPoints(); m_externPerimeter = 0; // there are contour pixels? if( externContour == NULL ) { return m_externPerimeter; } cvStartReadSeq( externContour, &reader); // create a sequence with the external points of the blob externalPoints = cvCreateSeq( externContour->flags, externContour->header_size, externContour->elem_size, m_storage ); cvStartAppendToSeq( externalPoints, &writer ); previousPoint.x = -1; // which contour pixels touch border? for( j=0; j< externContour->total; j++) { CV_READ_SEQ_ELEM( actualPoint, reader); find = false; // pixel is touching border? if ( xBorder & ((actualPoint.x == 0) || (actualPoint.x == m_originalImageSize.width - 1 )) || yBorder & ((actualPoint.y == 0) || (actualPoint.y == m_originalImageSize.height - 1 ))) { find = true; } else { if( maskImage != NULL ) { // verify if some of 8-connected neighbours is black in mask char *pMask; pMask = (maskImage->imageData + actualPoint.x - 1 + (actualPoint.y - 1) * maskImage->widthStep); for ( i = 0; i < 3; i++, pMask++ ) { if(*pMask == 0 && !find ) { find = true; break; } } if(!find) { pMask = (maskImage->imageData + actualPoint.x - 1 + (actualPoint.y ) * maskImage->widthStep); for ( i = 0; i < 3; i++, pMask++ ) { if(*pMask == 0 && !find ) { find = true; break; } } } if(!find) { pMask = (maskImage->imageData + actualPoint.x - 1 + (actualPoint.y + 1) * maskImage->widthStep); for ( i = 0; i < 3; i++, pMask++ ) { if(*pMask == 0 && !find ) { find = true; break; } } } } } if( find ) { if( previousPoint.x > 0 ) delta = abs(previousPoint.x - actualPoint.x) + abs(previousPoint.y - actualPoint.y); // calculate separately each external contour segment if( delta > 2 ) { cvEndWriteSeq( &writer ); m_externPerimeter += cvArcLength( externalPoints, CV_WHOLE_SEQ, 0 ); cvClearSeq( externalPoints ); cvStartAppendToSeq( externalPoints, &writer ); delta = 0; previousPoint.x = -1; } CV_WRITE_SEQ_ELEM( actualPoint, writer ); previousPoint = actualPoint; } } cvEndWriteSeq( &writer ); m_externPerimeter += cvArcLength( externalPoints, CV_WHOLE_SEQ, 0 ); cvClearSeq( externalPoints ); // divide by two because external points have one side inside the blob and the other outside // Perimeter of external points counts both sides, so it must be divided m_externPerimeter /= 2.0; return m_externPerimeter; }
double cvArcLength_Shadow( const CvArr * arr, CvSlice slice, int is_closed){ return cvArcLength( arr, slice, is_closed ); }
double cvArcLength_Shadow( const CvSeq * seq, CvSlice slice, int is_closed){ return cvArcLength( seq, slice, is_closed ); }
//-------------------------------------------------------------------------------- int ofxContourFinder::findContours( ofxCvGrayscaleImage& input, int minArea, int maxArea, int nConsidered, double hullPress, bool bFindHoles, bool bUseApproximation) { // get width/height disregarding ROI IplImage* ipltemp = input.getCvImage(); width = ipltemp->width; height = ipltemp->height; reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ofxContourFinder objects then to use // one, because you will get penalized less. if( inputCopy.width == 0 ) { inputCopy.allocate( input.width, input.height ); inputCopy = input; } else { if( inputCopy.width == input.width && inputCopy.height == input.height ) inputCopy = input; else { // we are allocated, but to the wrong size -- // been checked for memory leaks, but a warning: // be careful if you call this function with alot of different // sized "input" images!, it does allocation every time // a new size is passed in.... inputCopy.clear(); inputCopy.allocate( input.width, input.height ); inputCopy = input; } } CvSeq* contour_list = NULL; contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list, sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); CvSeq* contour_ptr = contour_list; nCvSeqsFound = 0; // put the contours from the linked list, into an array for sorting while( (contour_ptr != NULL) ) { CvBox2D box=cvMinAreaRect2(contour_ptr); float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { ofxBlob blob = ofxBlob(); float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ); cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = rect.x/width; blob.boundingRect.y = rect.y/height; blob.boundingRect.width = rect.width/width; blob.boundingRect.height = rect.height/height; //Angle Bounding rectangle blob.angleBoundingRect.x = box.center.x/width; blob.angleBoundingRect.y = box.center.y/height; blob.angleBoundingRect.width = box.size.height/width; blob.angleBoundingRect.height = box.size.width/height; blob.angle = box.angle; // assign other parameters blob.area = fabs(area); blob.hole = area < 0 ? true : false; blob.length = cvArcLength(contour_ptr); // The cast to int causes errors in tracking since centroids are calculated in // floats and they migh land between integer pixel values (which is what we really want) // This not only makes tracking more accurate but also more fluid blob.centroid.x = (myMoments->m10 / myMoments->m00) / width; blob.centroid.y = (myMoments->m01 / myMoments->m00) / height; blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; if (blob.nFingers != 0){ blob.nFingers = 0; blob.fingers.clear(); } // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( contour_ptr, &reader, 0 ); for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, contour_ptr->total); j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blob.pts.push_back( ofPoint((float)pt.x / width, (float)pt.y / height) ); } blob.nPts = blob.pts.size(); // Check if it´s a Hand and if it have fingers // if (area > 5000){ CvPoint* PointArray; int* hull; int hullsize; CvSeq* contourAprox = cvApproxPoly(contour_ptr, sizeof(CvContour), storage, CV_POLY_APPROX_DP, hullPress, 1 ); int count = contourAprox->total; // This is number point in contour PointArray = (CvPoint*)malloc( count*sizeof(CvPoint) ); // Alloc memory for contour point set. hull = (int*)malloc(sizeof(int)*count); // Alloc memory for indices of convex hull vertices. cvCvtSeqToArray(contourAprox, PointArray, CV_WHOLE_SEQ); // Get contour point set. // Find convex hull for curent contour. cvConvexHull( PointArray, count, NULL, CV_COUNTER_CLOCKWISE, hull, &hullsize); int upper = 1, lower = 0; for (int j=0; j<hullsize; j++) { int idx = hull[j]; // corner index if (PointArray[idx].y < upper) upper = PointArray[idx].y; if (PointArray[idx].y > lower) lower = PointArray[idx].y; } float cutoff = lower - (lower - upper) * 0.1f; // find interior angles of hull corners for (int j=0; j < hullsize; j++) { int idx = hull[j]; // corner index int pdx = idx == 0 ? count - 1 : idx - 1; // predecessor of idx int sdx = idx == count - 1 ? 0 : idx + 1; // successor of idx cv::Point v1 = cv::Point(PointArray[sdx].x - PointArray[idx].x, PointArray[sdx].y - PointArray[idx].y); cv::Point v2 = cv::Point(PointArray[pdx].x - PointArray[idx].x, PointArray[pdx].y - PointArray[idx].y); float angle = acos( (v1.x*v2.x + v1.y*v2.y) / (norm(v1) * norm(v2)) ); // We got a finger // if (angle < 1 ){ ofPoint posibleFinger = ofPoint((float)PointArray[idx].x / width, (float)PointArray[idx].y / height); blob.nFingers++; blob.fingers.push_back( posibleFinger ); } } if ( blob.nFingers > 0 ){ // because means that probably it's a hand ofVec2f fingersAverage; for (int j = 0; j < blob.fingers.size(); j++){ fingersAverage += blob.fingers[j]; } fingersAverage /= blob.fingers.size(); if (blob.gotFingers){ blob.palm = (blob.palm + fingersAverage)*0.5; //blob.palm = fingersAverage; } else { blob.palm = fingersAverage; blob.gotFingers = true; // If got more than three fingers in a road it'll remember } } // Free memory. free(PointArray); free(hull); } blobs.push_back(blob); } contour_ptr = contour_ptr->h_next; } nBlobs = blobs.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) cvReleaseMemStorage(&contour_storage); if( storage != NULL ) cvReleaseMemStorage(&storage); free(contour_ptr); return nBlobs; }
plateInfo* processPlateChars( IplImage * orig_img ) { //Create placeholder for gray image IplImage * gray_img = cvCreateImage(cvGetSize(orig_img), IPL_DEPTH_8U, 1); //Convert to image grayscale cvCvtColor( orig_img, gray_img, CV_RGB2GRAY ); //Create placeholder for black and white image IplImage * bw_img = cvCreateImage(cvGetSize(gray_img), IPL_DEPTH_8U, 1); //Convert gray image to binary (black and white) cvThreshold( gray_img, bw_img, 128, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); //Invert image IplImage * rev_img = cvCreateImage(cvGetSize(bw_img), IPL_DEPTH_8U, 1); cvNot( bw_img, rev_img ); //Save results // cvSaveImage( "bw_img.jpg", bw_img, NULL ); // cvSaveImage( "rev_img.jpg", rev_img, NULL ); //Resize the reversed image: 400x200 (435x218??) IplImage * resize_img = cvCreateImage(cvSize(400, 200), IPL_DEPTH_8U, 1); cvResize( rev_img, resize_img, CV_INTER_LINEAR) ; //Save results // cvSaveImage( "resize_img.jpg", resize_img, NULL ); //Okay, now find the reversed (and resized) image's size CvSize resize_size = cvGetSize( resize_img ); int w = resize_size.width; int h = resize_size.height; printf("Width: %d\nHieght: %d\n", w, h); //Allrighty, now try to crop //First, create new image in the right size //Old data for below: cvSize( w, h-108 ) (w-30, h-108) IplImage * resize_crop = cvCreateImage(cvSize(w-60, h-108), IPL_DEPTH_8U, 1); //Old data for below: cvRect( 0, 54, w, h-108) (15, 54 w-30, h-108) cvSetImageROI( resize_img, cvRect(30, 54, w-60, h-108) );//15 cvCopy( resize_img, resize_crop, NULL ); //Save this result // cvSaveImage( "resize_crop.jpg", resize_crop, NULL ); CvSize resizeCrop_size = cvGetSize( resize_crop ); int w2 = resizeCrop_size.width; int h2 = resizeCrop_size.height; printf("\nWidth: %d\nHieght: %d\n", w2, h2); //Now get the characters (using stuff from commented section below struct plateInfo * plate_info = malloc( sizeof(plateInfo) * 8 ); CvSeq * cvSeq = 0; CvMemStorage * storage = cvCreateMemStorage(0); int numContours; int i; //Poor man's debugger... char * plate_buffer[] = {"plate0.jpg", "plate1.jpg", "plate2.jpg", "plate3.jpg", "plate4.jpg","plate5.jpg", "plate6.jpg", "plate7.jpg"}; //This should be 8 numContours = cvFindContours( resize_crop, storage, &cvSeq, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); printf("\nnumContours plate: %d\n", numContours); cvDrawContours( resize_crop, cvSeq, cvScalarAll(255), cvScalarAll(0), 1, -1, 8, cvPoint(0,0) ); for( i = 0; i < 8; i++) { //Get bounding rect CvRect char_rect = cvBoundingRect( cvSeq, 0 ); //Create and set individual images plate_info[i].charImage = NULL; plate_info[i].charImage = cvCreateImage(cvSize(char_rect.width, char_rect.height), IPL_DEPTH_8U, 1); //****Moved to 'fix' below - contour area and perimeter***** //plate_info[i].contourArea = cvContourArea( cvSeq, CV_WHOLE_SEQ, 0 ); //plate_info[i].arcLength = cvArcLength( cvSeq, CV_WHOLE_SEQ, -1 ); //Set ROI for copying and copy cvSetImageROI( resize_crop, char_rect ); cvCopy( resize_crop, plate_info[i].charImage, NULL ); //For the poor man's debugger //cvSaveImage( plate_buffer[i], plate_chars[i], NULL ); //cvSaveImage( plate_buffer[i], plate_info[i].charImage, NULL ); cvSeq = cvSeq->h_next; } //Fix area and perimeter prob*********************** for( i = 0; i < 8; i++) { cvClearMemStorage(storage); int singlecon = cvFindContours( plate_info[i].charImage, storage, &cvSeq, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); //resize_crop cvDrawContours( plate_info[i].charImage, cvSeq, cvScalarAll(255), cvScalarAll(0), 1, -1, 8, cvPoint(0,0) ); // printf("\nnumContours single %d\n", singlecon); plate_info[i].contourArea = cvContourArea( cvSeq, CV_WHOLE_SEQ, 0 ); plate_info[i].arcLength = cvArcLength( cvSeq, CV_WHOLE_SEQ, 0 ); // printf("plateChar contour area: %f\n", plate_info[i].contourArea); // printf("plateChar perimeter: %f\n", plate_info[i].arcLength); // cvSaveImage( plate_buffer[i], plate_info[i].charImage, NULL ); } cvReleaseImage(&orig_img); cvReleaseImage(&gray_img); cvReleaseImage(&bw_img); cvReleaseImage(&rev_img); cvReleaseImage(&resize_img); cvReleaseImage(&resize_crop); cvReleaseMemStorage(&storage); return plate_info; }
//@char_info: pointer to an array declared in main charInfo * processTemplateChars( IplImage * template_img, int * numContours ) { //Create placeholder for black and with image IplImage * templateBW_img = cvCreateImage(cvGetSize(template_img), IPL_DEPTH_8U, 1); //Skipping the convert to graysalce step... //Convert image to binary (black and white) cvThreshold( template_img, templateBW_img, 128, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); //Invert image cvNot( templateBW_img, templateBW_img ); //Save results // cvSaveImage( "templateBW_img.jpg", templateBW_img, NULL ); //Now let's see if we can find (and count) the contours //Create necessary structures CvSeq * cvSeq2 = 0; CvMemStorage * storage2 = cvCreateMemStorage(0); //Find contours // int numContours; *numContours = cvFindContours( templateBW_img, storage2, &cvSeq2, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); //Verify contour count (should be 36) printf("\nContours found: %d\n", *numContours); //Now actually put the lines in cvDrawContours( templateBW_img, cvSeq2, cvScalarAll(255), cvScalarAll(0), 1, -1, 8, cvPoint(0,0) ); //Fill an array with the array positing of each character for easier //processing later (if not now...ugh) //For PA_Keystone_Template.jpg: char * position_text[] = {"7", "5", "4", "1", "V", "9", "8", "6", "3", "2", "0", "Z", "Y", "X", "W", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"}; //for PA_Template.jpg: /* char * position_text[] = {"9", "8", "7", "5", "3", "2", "1", "0", "Z", "Y", "X", "W", "V", "U", "T", "6", "4", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"}; */ //Allocate an array ofcharInfo structures struct charInfo * char_info = malloc( sizeof(charInfo) * (*numContours) ); //Put everything in its place int i = 0; CvRect char_rect; while( cvSeq2 )//for( i = 0; i < (*numContours); i++) { //Get bounding rect char_rect = cvBoundingRect( cvSeq2, 0 ); //Create and set individual images char_info[i].charImage = NULL; char_info[i].charImage = cvCreateImage( cvSize( char_rect.width, char_rect.height), IPL_DEPTH_8U, 1); //Set ROI for copying and copy cvSetImageROI( templateBW_img, char_rect ); cvCopy( templateBW_img, char_info[i].charImage, NULL ); //Set remaining charInfo data - area and perimemter moved to 'fix' //below // char_info[i].contourArea = cvContourArea( cvSeq2, CV_WHOLE_SEQ, 0 ); // char_info[i].arcLength = cvArcLength( cvSeq2, CV_WHOLE_SEQ, -1 ); char_info[i].charText = position_text[i]; //***might need strcpy*** cvSeq2 = cvSeq2->h_next; i++; } *numContours = i; printf("\nDrawn template letters: %d\n", *numContours); //Fix area and perimeter prob*********************** for( i = 0; i < (*numContours); i++) { cvClearMemStorage(storage2); int singlecon = cvFindContours( char_info[i].charImage, storage2, &cvSeq2, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); //templateBW_img cvDrawContours( char_info[i].charImage, cvSeq2, cvScalarAll(255), cvScalarAll(0), 1, -1, 8, cvPoint(0,0) ); // printf("\nnumContours single char %d\n", singlecon); char_info[i].contourArea = cvContourArea( cvSeq2, CV_WHOLE_SEQ, 0 ); char_info[i].arcLength = cvArcLength( cvSeq2, CV_WHOLE_SEQ, -1 ); // printf("templateChar contour area: %f\n", char_info[i].contourArea); // printf("templateChar perimeter: %f\n", char_info[i].arcLength); } //Test the above - works! // printf("\nCharacter 23 Info---\nText: %s\nContour Area: %f\n", char_info[23].charText, char_info[23].contourArea); /* //Save individually (yikes!) char * buffer[] = {"0.jpg", "1.jpg", "2.jpg", "3.jpg", "4.jpg","5.jpg", "6.jpg", "7.jpg", "8.jpg", "9.jpg", "10.jpg", "11.jpg", "12.jpg", "13.jpg", "14.jpg", "15.jpg", "16.jpg", "17.jpg", "18.jpg", "19.jpg", "20.jpg", "21.jpg", "22.jpg", "23.jpg", "24.jpg", "25.jpg", "26.jpg", "27.jpg", "28.jpg", "29.jpg", "30.jpg", "31.jpg", "32.jpg", "33.jpg", "34.jpg", "35.jpg"}; for( i = 0; i < *numContours; i++) { cvSaveImage( buffer[i], char_info[i].charImage, NULL ); //template_chars[i] } */ //Free stuff (put here temporarily) cvReleaseImage(&template_img); cvReleaseImage(&templateBW_img); // for( i = 0; i < numContours; i++) // { cvReleaseImage(&template_chars[i]); } // free(template_chars); cvReleaseImage(&template_img); cvReleaseImage(&templateBW_img); cvReleaseMemStorage(&storage2); return char_info; }
float mvContours::match_rectangle (IplImage* img, MvRBoxVector* rbox_vector, COLOR_TRIPLE color, float min_lw_ratio, float max_lw_ratio, int method) { assert (img != NULL); assert (img->nChannels == 1); int n_contours = find_contour_and_check_errors (img); if (n_contours <= 0 || m_contours == NULL) return -1; bin_calc.start(); CvSeq* c_contour = m_contours; int n_boxes = 0; // debug //mvWindow window("contours"); // examine each contour, put the passing ones into the circle_vector for (int C = 0; C < n_contours; C++, c_contour = c_contour->h_next) { // debug /*cvZero (img); draw_contours (c_contour, img); window.showImage (img); cvWaitKey(0); */ // check that there are at least 6 points if (c_contour->total < 6) { DEBUG_PRINT ("Rect Fail: Contour has less than 6 points\n"); continue; } // check the contour's area to make sure it isnt too small double area = cvContourArea(c_contour); if (method == 0) { if (area < img->width*img->height/600) { DEBUG_PRINT ("Rect Fail: Contour too small!\n"); continue; } } CvBox2D Rect = cvMinAreaRect2(c_contour, m_storage); float angle = Rect.angle; float length = Rect.size.height; float width = Rect.size.width; // depending on which is the long side we assign the sides and angle differently if (length < width) { length = Rect.size.width; width = Rect.size.height; angle += 90; } if (length/width < min_lw_ratio || length/width > max_lw_ratio) { DEBUG_PRINT ("Rect Fail: length/width = %6.2f\n", length/width); continue; } double perimeter = cvArcLength (c_contour, CV_WHOLE_SEQ, 1); double perimeter_ratio = perimeter / (2*length+2*width); double area_ratio = area / (length*width); if (method == 0) { if (area_ratio < 0.75 || perimeter_ratio > 1.2 || perimeter_ratio < 0.85) { DEBUG_PRINT ("Rect Fail: Area / Peri: %6.2lf / %6.2lf\n", area_ratio, perimeter_ratio); continue; } } else if (method == 1) { if (area_ratio < 0.55 || perimeter_ratio > 1.4 || perimeter_ratio < 0.75) { DEBUG_PRINT ("Rect Fail: Area / Peri: %6.2lf / %6.2lf\n", area_ratio, perimeter_ratio); continue; } } MvRotatedBox rbox; rbox.center.x = Rect.center.x; rbox.center.y = Rect.center.y; rbox.length = length; rbox.width = width; rbox.angle = angle; rbox.m1 = color.m1; rbox.m2 = color.m2; rbox.m3 = color.m3; assign_color_to_shape (color, &rbox); rbox.validity = area_ratio; rbox_vector->push_back(rbox); // draw a line to indicate the angle /*CvPoint p0, p1; int delta_x = length/2 * -sin(angle*CV_PI/180.f); int delta_y = length/2 * cos(angle*CV_PI/180.f); p0.x = x - delta_x; p0.y = y - delta_y; p1.x = x + delta_x; p1.y = y + delta_y; cvLine (img, p0, p1, CV_RGB(50,50,50), 2); */ n_boxes++; } bin_calc.stop(); return n_boxes; }
float mvContours::match_ellipse (IplImage* img, MvRBoxVector* ellipse_vector, COLOR_TRIPLE color, float min_lw_ratio, float max_lw_ratio, int method) { assert (img != NULL); assert (img->nChannels == 1); int n_contours = find_contour_and_check_errors(img); if (n_contours < 1 || m_contours == NULL) return -1; bin_calc.start(); CvSeq* c_contour = m_contours; int n_circles = 0; // debug //mvWindow window("contours"); // examine each contour, put the passing ones into the circle_vector for (int C = 0; C < n_contours; C++, c_contour = c_contour->h_next) { // debug /*cvZero (img); draw_contours (c_contour, img); window.showImage (img); cvWaitKey(0);*/ // check that there are at least 6 points if (c_contour->total < 6) { continue; } // check the contour's area to make sure it isnt too small double area = cvContourArea(c_contour); if (area < img->width*img->height/1000) { DEBUG_PRINT ("Ellipse Fail: Contour too small!\n"); continue; } // get min enclosing circle and radius //CvBox2D ellipse = cvFitEllipse2(c_contour); CvBox2D ellipse = cvMinAreaRect2(c_contour, m_storage); int height = ellipse.size.height; int width = ellipse.size.width; int a = height/2; int b = width/2; float height_to_width = static_cast<float>(height)/width; double perimeter = cvArcLength (c_contour, CV_WHOLE_SEQ, 1); if (height > img->width/2 || height < 0 || width > img->width/2 || width < 0) { continue; } // check length to width if (height_to_width < min_lw_ratio || height_to_width > max_lw_ratio) { DEBUG_PRINT ("Ellipse Fail: height_to_width = %6.2f\n", height_to_width); continue; } // do checks on area and perimeter double ellipse_area = (CV_PI*a*b); double ellipse_perimeter = CV_PI*(3*(a+b)-sqrt((3*a+b)*(a+3*b))); double area_ratio = area / ellipse_area; double perimeter_ratio = perimeter / ellipse_perimeter; DEBUG_PRINT ("Ellipse: area=%5.2lf/%5.2lf, perimeter=%5.2lf/%5.2lf\n", area, ellipse_area, perimeter, ellipse_perimeter); if (area_ratio < 0.75 || area_ratio > 1.25) { DEBUG_PRINT ("Ellipse Fail: Area: %6.2lf\n", area_ratio); continue; } if (perimeter_ratio < 0.75 || perimeter_ratio > 1.25) { DEBUG_PRINT ("Ellipse Fail: perimeter: %6.2lf\n", perimeter_ratio); continue; } MvRotatedBox rbox; rbox.center.x = ellipse.center.x; rbox.center.y = ellipse.center.y; rbox.length = height; rbox.width = width; rbox.angle = ellipse.angle; rbox.m1 = color.m1; rbox.m2 = color.m2; rbox.m3 = color.m3; assign_color_to_shape (color, &rbox); rbox.validity = area_ratio; ellipse_vector->push_back(rbox); //cvEllipse (img, cvPoint(ellipse.center.x,ellipse.center.y), cvSize(b,a), ellipse.angle, 0, 359, CV_RGB(50,50,50), 2); //window.showImage (img); //cvWaitKey(0); n_circles++; } bin_calc.stop(); return n_circles; }
//-------------------------------------------------------------------------------- int ContourFinder::findContours( ofxCvGrayscaleImage& input, int minArea, int maxArea, int nConsidered, bool bFindHoles, bool bUseApproximation) { reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ContourFinder objects then to use // one, because you will get penalized less. if( inputCopy.width == 0 ) { inputCopy.allocate( input.width, input.height ); inputCopy = input; } else { if( inputCopy.width == input.width && inputCopy.height == input.height ) { inputCopy = input; } else { // we are allocated, but to the wrong size -- // been checked for memory leaks, but a warning: // be careful if you call this function with alot of different // sized "input" images!, it does allocation every time // a new size is passed in.... //inputCopy.clear(); inputCopy.allocate( input.width, input.height ); inputCopy = input; } } CvSeq* contour_list = NULL; contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; teste = inputCopy.getCvImage(); cvFindContours( teste, contour_storage, &contour_list, sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); CvSeq* contour_ptr = contour_list; nCvSeqsFound = 0; // put the contours from the linked list, into an array for sorting while( (contour_ptr != NULL) ) { float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { if (nCvSeqsFound < TOUCH_MAX_CONTOUR_LENGTH){ cvSeqBlobs[nCvSeqsFound] = contour_ptr; // copy the pointer nCvSeqsFound++; } } contour_ptr = contour_ptr->h_next; } // sort the pointers based on size if( nCvSeqsFound > 0 ) { qsort( cvSeqBlobs, nCvSeqsFound, sizeof(CvSeq*), qsort_carea_compare); } // now, we have nCvSeqsFound contours, sorted by size in the array // cvSeqBlobs let's get the data out and into our structures that we like for( int i = 0; i < MIN(nConsidered, nCvSeqsFound); i++ ) { blobs.push_back( Blob() ); float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ ); cvMoments( cvSeqBlobs[i], myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( cvSeqBlobs[i], 0 ); blobs[i].boundingRect.x = rect.x; blobs[i].boundingRect.y = rect.y; blobs[i].boundingRect.width = rect.width; blobs[i].boundingRect.height = rect.height; cvCamShift(teste, rect, cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ), &track_comp, &track_box); // this is for using angle bounding box CvBox2D32f box; box = cvMinAreaRect2( cvSeqBlobs[i] ); blobs[i].angleBoundingRect.x = box.center.x; blobs[i].angleBoundingRect.y = box.center.y; blobs[i].angleBoundingRect.width = box.size.height; blobs[i].angleBoundingRect.height = box.size.width; blobs[i].angle = box.angle; // assign other parameters blobs[i].area = fabs(area); blobs[i].hole = area < 0 ? true : false; blobs[i].length = cvArcLength(cvSeqBlobs[i]); blobs[i].centroid.x = (int) (myMoments->m10 / myMoments->m00); blobs[i].centroid.y = (int) (myMoments->m01 / myMoments->m00); blobs[i].lastCentroid.x = (int) 0; blobs[i].lastCentroid.y = (int) 0; // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( cvSeqBlobs[i], &reader, 0 ); for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, cvSeqBlobs[i]->total); j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blobs[i].nPts = blobs[i].pts.size(); } nBlobs = blobs.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); } if( storage != NULL ) { cvReleaseMemStorage(&storage); } return nBlobs; }
//-------------------------------------------------------------------------------- int ContourFinder::findContours( ofxCvGrayscaleImage& input, int minArea, int maxArea, int nConsidered, bool bFindHoles, bool bUseApproximation) { reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ContourFinder objects then to use // one, because you will get penalized less. if( inputCopy.width == 0 ) { inputCopy.allocate( input.width, input.height ); inputCopy = input; } else { if( inputCopy.width == input.width && inputCopy.height == input.height ) inputCopy = input; else { // we are allocated, but to the wrong size -- // been checked for memory leaks, but a warning: // be careful if you call this function with alot of different // sized "input" images!, it does allocation every time // a new size is passed in.... //inputCopy.clear(); inputCopy.allocate( input.width, input.height ); inputCopy = input; } } CvSeq* contour_list = NULL; contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list, sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); CvSeq* contour_ptr = contour_list; nCvSeqsFound = 0; // put the contours from the linked list, into an array for sorting while( (contour_ptr != NULL) ) { CvBox2D box=cvMinAreaRect2(contour_ptr); int objectId; // If the contour is an object, then objectId is its ID objectId=(bTrackObjects)? templates->getTemplateId(box.size.width,box.size.height): -1; if(objectId != -1 ) //If the blob is a object { Blob blob = Blob(); blob.id = objectId; blob.isObject = true; float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ ); cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = rect.x; blob.boundingRect.y = rect.y; blob.boundingRect.width = rect.width; blob.boundingRect.height = rect.height; //For anglebounding rectangle blob.angleBoundingBox=box; blob.angleBoundingRect.x = box.center.x; blob.angleBoundingRect.y = box.center.y; blob.angleBoundingRect.width = box.size.height; blob.angleBoundingRect.height = box.size.width; blob.angle = box.angle; //TEMPORARY INITIALIZATION TO 0, Will be calculating afterwards.This is to prevent sending wrong data blob.D.x = 0; blob.D.y = 0; blob.maccel = 0; // assign other parameters blob.area = fabs(area); blob.hole = area < 0 ? true : false; blob.length = cvArcLength(contour_ptr); blob.centroid.x = (myMoments->m10 / myMoments->m00); blob.centroid.y = (myMoments->m01 / myMoments->m00); blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( contour_ptr, &reader, 0 ); for( int j=0; j < contour_ptr->total; j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blob.nPts = blob.pts.size(); objects.push_back(blob); } else if(bTrackFingers) { float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { Blob blob=Blob(); float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ ); cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = rect.x; blob.boundingRect.y = rect.y; blob.boundingRect.width = rect.width; blob.boundingRect.height = rect.height; //Angle Bounding rectangle blob.angleBoundingRect.x = box.center.x; blob.angleBoundingRect.y = box.center.y; blob.angleBoundingRect.width = box.size.height; blob.angleBoundingRect.height = box.size.width; blob.angle = box.angle; // assign other parameters blob.area = fabs(area); blob.hole = area < 0 ? true : false; blob.length = cvArcLength(contour_ptr); // AlexP // The cast to int causes errors in tracking since centroids are calculated in // floats and they migh land between integer pixel values (which is what we really want) // This not only makes tracking more accurate but also more fluid blob.centroid.x = (myMoments->m10 / myMoments->m00); blob.centroid.y = (myMoments->m01 / myMoments->m00); blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( contour_ptr, &reader, 0 ); for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, contour_ptr->total); j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blob.nPts = blob.pts.size(); blobs.push_back(blob); } } contour_ptr = contour_ptr->h_next; } nBlobs = blobs.size(); nObjects = objects.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); } if( storage != NULL ) { cvReleaseMemStorage(&storage); } //printf("Number of objects : %d\n",nObjects); return nBlobs; }