bool SimpleFilter::isQRMarker( CvSeq* contour, float error ) { if( !checkContourDepth( contour ) ) return false; CvSeq* seq = contour; CvBox2D box1 = cvMinAreaRect2( seq ); float area1 = ( box1.size.width * box1.size.height ); seq = seq->v_next; CvBox2D box2 = cvMinAreaRect2( seq ); float area2 = ( box2.size.width * box2.size.height ); seq = seq->v_next; CvBox2D box3 = cvMinAreaRect2( seq ); float area3 = ( box3.size.width * box3.size.height ); float k1 = area1/area2; float k2 = area2/area3; if( abs( k1 - QR_MARKER_RATIO_1 ) < error && abs( k2 - QR_MARKER_RATIO_2 ) < error ) return true; return false; }
void sharingan() { int lowtherd =120; int hightherd=130; int small_size=500; int contour_num; cvCvtColor(vision,gray_vision,CV_BGR2GRAY); //Gauss smooth cvSmooth( gray_vision,gray_vision,CV_GAUSSIAN,3,3,0,0); //Canny edge detect cvCanny(gray_vision,gray_vision,lowtherd,hightherd,3); //Threshold cvThreshold(gray_vision,gray_vision,0,255,CV_THRESH_BINARY); //picture used to display //find countor CvSeq * fc=NULL; CvSeq * c; cvClearMemStorage(mem); contour_num=cvFindContours(gray_vision,mem,&fc,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_NONE,cvPoint(0,0)); // printf("find counters:%d\n",contour_num); c=fc; cvCopyImage(blank,road_vision); cvCopyImage(blank,sign_vision); sign_flag=0; line_num=0; corn_num=0; while(c!=NULL) { CvBox2D rect = cvMinAreaRect2(c,mem); double width=rect.size.height>rect.size.width?rect.size.height:rect.size.width; double height=rect.size.height<=rect.size.width?rect.size.height:rect.size.width; if(height*width>small_size) { double s; s=cvContourArea(c,CV_WHOLE_SEQ,0); if(s>500) { sign_flag=1; cvDrawContours(sign_vision,c,cvScalar(255,255,255,0), cvScalar(255,255,255,0),0, 1,8,cvPoint(0,0)); } else if(s<=500) { if(width>50&&height<15) { line_box[line_num]=rect; line_num++; } else { corn_box[line_num]=rect; corn_num++; } cvDrawContours(road_vision,c,cvScalar(255,255,255,0), cvScalar(255,255,255,0),0, 1,8,cvPoint(0,0)); } } c=c->h_next; } }
void reg_sign() { int counter_num=0; IplImage * temp; temp= cvCreateImage(cvSize(sign_rect.width,sign_rect.height),8,1); cvCvtColor(reg_vision,temp,CV_BGR2GRAY); //Gauss smooth cvSmooth(temp,temp,CV_GAUSSIAN,3,3,0,0); //Canny edge detect cvCanny(temp,temp,120,150,3); //Threshold CvSeq * sc; CvSeq * c; cvThreshold(temp,temp,0,255,CV_THRESH_BINARY); counter_num=cvFindContours(temp,mem,&sc,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_NONE,cvPoint(0,0)); double rmin=-1; double r; c=sc; CvBox2D minrect; while(c!=NULL) { CvBox2D rect = cvMinAreaRect2(c,mem); r=((double)(rect.size.width*rect.size.height))/(sign_rect.width*sign_rect.height); if(r>0.1) { if(r<rmin||rmin<0) { rmin=r; minrect=rect; } } c=c->h_next; } //printf("counter:%d rate:%f\n",counter_num,rmin); //cvShowImage("reg_vision",reg_vision); if(rmin<0.2) { cur_sign=GO_AHEAD; printf("GO_AHEAD!\n"); } else if(rmin<0.5) { cur_sign=TURN_RIGHT; printf("TURN_RIGHT!\n"); } else if(rmin<0.7) { cur_sign=STOP; printf("STOP!\n"); } else { cur_sign=NONE; printf("NONE!\n"); } cvReleaseImage(®_vision); cvReleaseImage(&temp); }
int contorsFindBox(IplImage *src, CvMemStorage* storage, CvBox2D *box) { CvSeq *contours; int ret; double area; assert((area = src->width * src->height) > 0); ret = cvFindContours(src, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0)); if (ret == 0) return 1; for (CvSeq *c = contours; c != NULL; c = c->h_next) { c = cvApproxPoly(c, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 5, 1); double contour_area = fabs(cvContourArea(c, CV_WHOLE_SEQ, 0)); double ratio = area / contour_area; if (ratio > 1.5 && ratio < 6.0) { CvBox2D b = cvMinAreaRect2(c, NULL); memcpy(box, &b, sizeof(CvBox2D)); return 0; } } return 1; }
void mvContours::get_rect_parameters (IplImage* img, CvSeq* contour1, CvPoint ¢roid, float &length, float &angle) { assert (contour1->total > 6); // needed by cvFitEllipse2 CvBox2D Rect = cvMinAreaRect2(contour1, m_storage); angle = Rect.angle; int height = Rect.size.height; int width = Rect.size.width; // depending on which is the long side we assign the angle differently if (height > width) { length = height; } else { length = width; angle += 90; } int x = Rect.center.x; int y = Rect.center.y; centroid.x = x - img->width*0.5; centroid.y = -y + img->height*0.5; // the centroid y is measured to the bottom of the image // draw a line to indicate the angle CvPoint p0, p1; int delta_x = length/2 * -sin(angle*CV_PI/180.f); int delta_y = length/2 * cos(angle*CV_PI/180.f); p0.x = x - delta_x; p0.y = y - delta_y; p1.x = x + delta_x; p1.y = y + delta_y; cvLine (img, p0, p1, CV_RGB(50,50,50), 2); }
CvPoint SimpleFilter::getCenterPoint(CvSeq *contour) { CvPoint point; CvSeq* seq = contour; CvBox2D box1 = cvMinAreaRect2( seq ); seq=seq->v_next; CvBox2D box2=cvMinAreaRect2( seq ); seq=seq->v_next; CvBox2D box3=cvMinAreaRect2( seq ); point.x=(box1.center.x+box2.center.x+box3.center.x)/3; point.y=(box1.center.y+box2.center.y+box3.center.y)/3; return point; }
/* * PlatoFilter combines some of the other pre-defined filters to * determine if a given contour belongs to a plastic Dish */ int Contours::platoFilter(){ CvMemStorage* mem = cvCreateMemStorage(0); //Number of points should be at least 6. if(this->c->total<6) return false; CvBox2D box=cvMinAreaRect2(this->c,mem); CvBox2D box2=cvFitEllipse2(this->c); cvReleaseMemStorage(&mem); double majorAxis,minorAxis; double majorAxis2,minorAxis2; if(box2.size.width>box2.size.height){ majorAxis=box2.size.width; minorAxis=box2.size.height; }else{ minorAxis=box2.size.width; majorAxis=box2.size.height; } if(box.size.width>box.size.height){ majorAxis2=box.size.width; minorAxis2=box.size.height; }else{ minorAxis2=box.size.width; majorAxis2=box.size.height; } double boxDiff=fabs(majorAxis - majorAxis2) + fabs(minorAxis-minorAxis2); //eccentricity formula double ecc=sqrt(majorAxis*majorAxis -minorAxis*minorAxis)/majorAxis; double calcArea=miPI*(majorAxis/2)*(minorAxis/2); double realArea=this->getArea(); //Area of approximated elipse and of the contour should be similar. if(fabs(calcArea-realArea)/realArea > 0.2) return false; //gets circularity double circ=this->getCircularity(); if(circ<10 || circ>20) return false; if(ecc<0.65 || ecc>0.95) return false; return true; }
// the function draws all the squares in the image void drawSquares(IplImage* imgSrc, CvSeq* squares) { CvSeqReader reader; IplImage* imgCopy = cvCloneImage(imgSrc); int i; // initialize reader of the sequence cvStartReadSeq(squares, &reader, 0); // read 4 sequence elements at a time (all vertices of a square) printf("Found %d rectangles in image\n", squares->total / 4); for (i = 0; i < squares->total; i += 4) { CvPoint* pntRect = gPnt; int pntCount = 4; CvSeq* seqRect = cvCreateSeq(CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), gStorage); // read 4 vertices memcpy(gPnt, reader.ptr, squares->elem_size); CV_NEXT_SEQ_ELEM(squares->elem_size, reader); cvSeqPush(seqRect, &pntRect[0]); memcpy(gPnt + 1, reader.ptr, squares->elem_size); CV_NEXT_SEQ_ELEM(squares->elem_size, reader); cvSeqPush(seqRect, &pntRect[1]); memcpy(gPnt + 2, reader.ptr, squares->elem_size); CV_NEXT_SEQ_ELEM(squares->elem_size, reader); cvSeqPush(seqRect, &pntRect[2]); memcpy(gPnt + 3, reader.ptr, squares->elem_size); CV_NEXT_SEQ_ELEM(squares->elem_size, reader); cvSeqPush(seqRect, &pntRect[3]); // draw the square as a closed polyline cvPolyLine(imgCopy, &pntRect, &pntCount, 1, 1, CV_RGB(0, 255, 0), 1, CV_AA, 0); // draw the min outter rect CvBox2D box = cvMinAreaRect2(seqRect, NULL); CvPoint2D32f ptBox[4]; cvBoxPoints(box, ptBox); for(int i = 0; i < 4; ++i) { cvLine(imgCopy, cvPointFrom32f(ptBox[i]), cvPointFrom32f(ptBox[((i+1)%4)?(i+1):0]), CV_RGB(255,0,0)); } } // show the resultant image cvShowImage(wndname, imgCopy); cvReleaseImage(&imgCopy); }
void cvMinAreaRect( CvPoint* points, int n, int, int, int, int, CvPoint2D32f* anchor, CvPoint2D32f* vect1, CvPoint2D32f* vect2 ) { CvMat mat = cvMat( 1, n, CV_32SC2, points ); CvBox2D box = cvMinAreaRect2( &mat, 0 ); CvPoint2D32f pt[4]; cvBoxPoints( box, pt ); *anchor = pt[0]; vect1->x = pt[1].x - pt[0].x; vect1->y = pt[1].y - pt[0].y; vect2->x = pt[3].x - pt[0].x; vect2->y = pt[3].y - pt[0].y; }
int Contours::boxAreaFilter(double minAreaRatio){ CvBox2D box; CvMemStorage* mem = cvCreateMemStorage(0); double boxArea,contourArea,dif,ret; box=cvMinAreaRect2(this->c,mem); boxArea=box.size.width*box.size.height; contourArea=fabs(cvContourArea(this->c,CV_WHOLE_SEQ)); dif=boxArea-contourArea; ret=(1- (dif/boxArea)); cvReleaseMemStorage( &mem ); return ret > minAreaRatio; }
//在图像srcImg上根据contour轮廓画上最小外接矩形 CvBox2D DrawMinAreaRect(IplImage *srcImg,CvSeq *contour,CvScalar color/*=CV_RGB(255,0,0)*/) { CvBox2D box=cvMinAreaRect2(contour); CvPoint2D32f box_vtx[4]; CvPoint pt0, pt; cvBoxPoints( box, box_vtx ); pt0.x = cvRound(box_vtx[3].x); pt0.y = cvRound(box_vtx[3].y); for(int i = 0; i < 4; i++ ) { pt.x = cvRound(box_vtx[i].x); pt.y = cvRound(box_vtx[i].y); cvLine(srcImg, pt0, pt, color, 1, CV_AA, 0); pt0 = pt; } return box; }
/* * Prints a contour on a dst Image. Used for debugging. * prints text at the side of a contour. * depthLevel sets the level in the contour tree(to include/exclue holes) */ void Contours::printContour(int depthLevel, CvScalar color,IplImage * dst){ CvFont font; int line_type=CV_AA; char * a=(char *) malloc(20); char * b=(char *) malloc(20); char * c=(char *) malloc(20); char * d=(char *) malloc(20); char * e=(char *) malloc(20); cvDrawContours( dst, this->c, CV_RGB(255,0,0), CV_RGB(0,255,0), depthLevel, 3, CV_AA, cvPoint(0,0) ); CvMemStorage* mem = cvCreateMemStorage(0); CvBox2D box=cvMinAreaRect2(this->c,mem); //~ traversePoints(this->c); std::vector<int> centroid=this->getCentroid(); CvPoint pt2=cvPoint(centroid[0]+5,centroid[1]+5); CvPoint pt3=cvPoint(centroid[0]+5,centroid[1]+15); CvPoint pt4=cvPoint(centroid[0]+5,centroid[1]+25); CvPoint pt5=cvPoint(centroid[0]+5,centroid[1]+35); CvPoint pt6=cvPoint(centroid[0]+5,centroid[1]+45); sprintf(a,"per: %g",this->getPerimeter()); sprintf(b,"zone: %d",getPointZone(this->x,this->y)); sprintf(c,"area: %g",this->getArea()); sprintf(d,"ecc: %g",this->getPerimeter()*this->getPerimeter()/this->getArea()); //~ sprintf(d,"boxArea: %g",(double) this->getArea()/(box.size.width*box.size.height)); cvInitFont( &font, CV_FONT_HERSHEY_COMPLEX, 0.5, 0.5, 0.0,0.5, line_type ); cvPutText( dst, a, pt2, &font, CV_RGB(255,255,0)); cvPutText( dst, c, pt3, &font, CV_RGB(255,255,0)); cvPutText( dst, b, pt4, &font, CV_RGB(255,255,0)); cvPutText( dst, d, pt5, &font, CV_RGB(255,255,0)); //~ free(a); cvReleaseMemStorage(&mem); }
/* boxArea filter - checks that the area of a contour * is at least minAreaRatio of that of the contour bounding box*/ int Contours::boxAreaFilter(double minAreaRatio){ CvBox2D box; CvMemStorage* mem = cvCreateMemStorage(0); double boxArea,contourArea,dif,ret; box=cvMinAreaRect2(this->c,mem); boxArea=box.size.width*box.size.height; contourArea=this->getArea(); dif=boxArea-contourArea; ret=(1- (dif/boxArea)); /* Use this to set it by zone int zone=getPointZone(this->x,this->y); double minAreaRatioByZone[]={0,0.6,0.55,0.55,0.55}; return ret > minAreaRatioByZone[zone]; */ cvReleaseMemStorage( &mem ); return ret > minAreaRatio; }
/* rectangular aspect filter - checks that a contour bounding rectangle * has certain width/height proportions */ int Contours::rectangularAspectFilter(double minWidthHeightRatio, double maxWidthHeightRatio ){ CvBox2D box; CvMemStorage* mem = cvCreateMemStorage(0); int ret; box=cvMinAreaRect2(this->c,mem); if( (box.size.width > minWidthHeightRatio* box.size.height) && (box.size.width < (maxWidthHeightRatio)* box.size.height) || (box.size.height > minWidthHeightRatio* box.size.width) && (box.size.height < (maxWidthHeightRatio)*box.size.width)){ ret=1; } else{ ret=0; } cvReleaseMemStorage( &mem ); return ret; }
QVector<CvSeq*> SimpleFilter::collectContours( CvSeq* contour ) { QVector<CvSeq*> contours; if( !contour ) { return contours; } if( isQRMarker( contour, QR_MARKER_ERROR ) ) { CvBox2D box = cvMinAreaRect2( contour ); double countourArea = 0.0; double i1, i2 = 0.0; countourArea = fabs( cvContourArea( contour ) ); i1 = countourArea / (double)( box.size.width * box.size.height ); i2 =( ( box.size.width < box.size.height ) ? (double) box.size.width / (double) box.size.height: (double) box.size.height / (double) box.size.width ); if( i1 >= MIN_CONTOURS_AREA_RATIO && i2 >= MIN_CONTOURS_AREA_RATIO && abs( box.size.width * box.size.height - countourArea ) < countourArea * 0.3 ) { contours.push_back( contour ); } } QVector<CvSeq*> contours1 = collectContours( contour->h_next ); QVector<CvSeq*> contours2 = collectContours( contour->v_next ); foreach( CvSeq* c, contours1 ) contours.push_back(c); foreach( CvSeq* c, contours2 ) contours.push_back(c); return contours; }
// 传进来一个contour,然后计算它的最小包围矩形minRect,再把原图以包围矩形中心为旋转中心旋转minRect.angle°,得到调正的图像。 // http://blog.csdn.net/include1224/article/details/4384855 CvBox2D RegionRotate(IplImage *src, IplImage *dst, CvSeq *contour) { //dst 是通过cvClone()src得到的 CvMat *mat_contour = cvCreateMat(1, contour->total, CV_32FC2); //双通道 CvPoint2D32f *ptr_mat = (CvPoint2D32f*) (mat_contour->data.ptr); for (int i = 0; i != contour->total; ++i) { CvPoint *ptr_seq = (CvPoint*) (cvGetSeqElem(contour, i)); *ptr_mat = cvPointTo32f(*ptr_seq); //显示把CvPoint转换成CvPoint2D32F ptr_mat++; } //把轮廓变成矩阵 CvBox2D minRect = cvMinAreaRect2(mat_contour); //得到最小包围矩形 //CvMat *rot = cvCreateMat(2,3,CV_32FC1); //cv2DRotationMatrix(cvPoint2D32f(src->width*0.5f,src->height*0.5f),minRect.angle,0.6,rot);//计算得到旋转矩阵----这里计算得到的矩阵不能使图像变换到想要的旋转结果 float factor = 1.0; //缩放 float angle = -minRect.angle; float w = 0, h = 0; w = minRect.center.x; h = minRect.center.y; RotateImage(src, dst, cvPoint(w, h), angle, factor); //cvEllipseBox(dst,minRect,cvScalar(0,0,255)); cvReleaseMat(&mat_contour); return minRect; //返回最佳包围盒 }
void icvGetQuadrangleHypotheses(CvSeq* contours, std::vector<std::pair<float, int> >& quads, int class_id) { const float min_aspect_ratio = 0.3f; const float max_aspect_ratio = 3.0f; const float min_box_size = 10.0f; for(CvSeq* seq = contours; seq != NULL; seq = seq->h_next) { CvBox2D box = cvMinAreaRect2(seq); float box_size = MAX(box.size.width, box.size.height); if(box_size < min_box_size) { continue; } float aspect_ratio = box.size.width/MAX(box.size.height, 1); if(aspect_ratio < min_aspect_ratio || aspect_ratio > max_aspect_ratio) { continue; } quads.push_back(std::pair<float, int>(box_size, class_id)); } }
trainner::trainner() { /* * reading images, convert them to binary,extract features */ FileReader reader("pathsF.txt"); FileWriter writer("out.txt"); string line = ""; IplImage* img; IplImage*gray_im; IplImage*gray_img; CvSeq* contour; //pointer to a contour. CvMemStorage* space = cvCreateMemStorage(0); CvBox2D hand_boundary; CvSeq* largest_contour; int i = 0; int all_time[dataset_size]; while ((line = reader.readFile()) != "") { std::clock_t start; start = std::clock(); //load the img img = cvLoadImage(line.c_str()); cvSmooth(img, img, CV_GAUSSIAN, 5, 5); gray_im = cvCloneImage(img); cvCvtColor(img, gray_im, CV_BGR2YCrCb); gray_img = cvCreateImage(cvGetSize(gray_im), 8, 1); cvInRangeS(gray_im, cvScalar(0, 131, 80), cvScalar(255, 185, 135), gray_img); cvSmooth(gray_img, gray_img, CV_MEDIAN, 5, 5); hand = cvCloneImage(gray_img); //finding all contours in the image cvFindContours(gray_img, space, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0)); //iterate through each contour double max_area = 0; double area = 0; //finding largest contour while (contour) { area = cvContourArea(contour); if (area > max_area) { max_area = area; largest_contour = contour; } contour = contour->h_next; } if (largest_contour && largest_contour->total > 0) { hand_boundary = cvMinAreaRect2(largest_contour); float max = hand_boundary.size.width; if (max < hand_boundary.size.height) max = hand_boundary.size.height; //copy the hand in its own image CvRect rounded = cvRect(hand_boundary.center.x - (max / 2) - 25, hand_boundary.center.y - (max / 2) - 25, max + 50, max + 50); cvSetImageROI(hand, rounded); hand = cvCloneImage(hand); // cvShowImage("image", hand); cvWaitKey(0); cvReleaseImage(&gray_img); cvClearSeq(largest_contour); // string bin = extract_feature(); //write to file // writer.writeFile(bin); extract_feature(i); } else { for (int j = 0; j < number_of_features; j++) data[i][j] = 0.0; } int timm = (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000); all_time[i] = timm; i++; } int sum = 0; for (int i = 0; i < dataset_size; i++) { sum += all_time[i]; } sum = sum / dataset_size; cout << sum << endl; reader.~FileReader(); // now train the classifier train(); //print features for (int i = 0; i < dataset_size; i++) { ostringstream oss; if (i < 11) oss << "up"; else if (i < 30) oss << "open"; else if (i < 60) oss << "capture"; else if (i < 83) oss << "call"; else if (i < 101) oss << "left"; else if (i < 125) oss << "right"; else if (i < 136) oss << "closed"; else if (i < 149) oss << "start"; else if (i < 159) oss << "Lup"; else if (i < 173) oss << "Lopen"; else if (i < 190) oss << "Lcapture"; else if (i < 197) oss << "Lcall"; oss << ","; for (int j = 0; j < number_of_features; j++) { if (data[i][j] == 0.0) oss << "0"; else oss << "1"; oss << ","; } string name = oss.str(); writer.writeFile(name); } writer.~FileWriter(); }
int main() { CvVideoWriter *writer; //capture =cvCreateFileCapture("hand4.avi") ; // capture = cvCaptureFromCAM(0) ; cvNamedWindow("Webcam",0); //cvNamedWindow("Virtual hand",0); writer = cvCreateVideoWriter("palm_output2.avi",CV_FOURCC('M','J','P','G'),15,cvSize(640,480),1); while(1) { frame = cvQueryFrame(capture); //cvWriteFrame(writer,frame); cvCvtColor(frame,frame,CV_BGR2HSV); // IMPORTANT!! // The following FOR loop generates binary image which contains ONLY the arm. // Please replace the following FOR loop with your own method to generate the ideal output image. // Because mine method definitely won't work for you. // for(int i=0;i<frame->height;i++) //REPLACE ME { for(int j=0;j<frame->width;j++) { //if(frame->imageData[i*frame->widthStep+(j*3)+2] < 90 && frame->imageData[i*frame->widthStep+(j*3)+2] > 0 && frame->imageData[i*frame->widthStep+(j*3)+1] < 0) if(frame->imageData[i*frame->widthStep+(j*3)] < 50 || frame->imageData[i*frame->widthStep+(j*3)+2] > 170) { mask->imageData[i*mask->width+j] = 255;} else mask->imageData[i*mask->width+j] = 0; } } cvCvtColor(frame,frame,CV_HSV2BGR); cvCopy(frame,frame2); //cvErode(mask,mask,0,2); cvErode(mask,mask,0,1); //ERODE first then DILATE to eliminate the noises. cvDilate(mask,mask,0,1); cvFindContours( mask, storage, &contours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cvPoint(0,0) ); // We choose the first contour in the list which is longer than 650. // You might want to change the threshold to which works the best for you. while(contours && contours->total <= 650) { contours = contours->h_next; } cvDrawContours( frame, contours, CV_RGB(100,100,100), CV_RGB(0,255,0), 1, 2, CV_AA, cvPoint(0,0) ); // // Use a rectangle to cover up the contour. // Find the center of the rectangle (armcenter). Fingertip() needs it. // if(contours) { contourcenter = cvMinAreaRect2(contours,0); armcenter.x = cvRound(contourcenter.center.x); armcenter.y = cvRound(contourcenter.center.y); //cvCircle(frame,armcenter,10,CV_RGB(255,255,255),-1,8,0); getconvexhull(); fingertip(); hand(); } cvShowImage("Webcam",frame); //cvShowImage("Virtual hand",virtualhand); if(savepic) { int framenum = (int)cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES); char name[10]; itoa(framenum,name,10); sprintf(name,"%sfix4.jpg",name); //printf("%s\n",name); //cvSaveImage(name,frame); savepic = false; } //printf("FPS:%d\n",(int)cvGetCaptureProperty(capture,CV_CAP_PROP_FPS)); // cvZero(virtualhand); if(cvWaitKey(1)>=0 || !frame) { //cvSaveImage("normal.jpg",frame2); break; } } cvReleaseCapture(&capture); cvDestroyWindow("Webcam"); //cvDestroyWindow("Virtual hand"); cvReleaseVideoWriter(&writer); }
//-------------------------------------------------------------------------------- int ofxContourFinder::findContours( ofxCvGrayscaleImage& input, int minArea, int maxArea, int nConsidered, double hullPress, bool bFindHoles, bool bUseApproximation) { // get width/height disregarding ROI IplImage* ipltemp = input.getCvImage(); width = ipltemp->width; height = ipltemp->height; reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ofxContourFinder objects then to use // one, because you will get penalized less. if( inputCopy.width == 0 ) { inputCopy.allocate( input.width, input.height ); inputCopy = input; } else { if( inputCopy.width == input.width && inputCopy.height == input.height ) inputCopy = input; else { // we are allocated, but to the wrong size -- // been checked for memory leaks, but a warning: // be careful if you call this function with alot of different // sized "input" images!, it does allocation every time // a new size is passed in.... inputCopy.clear(); inputCopy.allocate( input.width, input.height ); inputCopy = input; } } CvSeq* contour_list = NULL; contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list, sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); CvSeq* contour_ptr = contour_list; nCvSeqsFound = 0; // put the contours from the linked list, into an array for sorting while( (contour_ptr != NULL) ) { CvBox2D box=cvMinAreaRect2(contour_ptr); float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { ofxBlob blob = ofxBlob(); float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ); cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = rect.x/width; blob.boundingRect.y = rect.y/height; blob.boundingRect.width = rect.width/width; blob.boundingRect.height = rect.height/height; //Angle Bounding rectangle blob.angleBoundingRect.x = box.center.x/width; blob.angleBoundingRect.y = box.center.y/height; blob.angleBoundingRect.width = box.size.height/width; blob.angleBoundingRect.height = box.size.width/height; blob.angle = box.angle; // assign other parameters blob.area = fabs(area); blob.hole = area < 0 ? true : false; blob.length = cvArcLength(contour_ptr); // The cast to int causes errors in tracking since centroids are calculated in // floats and they migh land between integer pixel values (which is what we really want) // This not only makes tracking more accurate but also more fluid blob.centroid.x = (myMoments->m10 / myMoments->m00) / width; blob.centroid.y = (myMoments->m01 / myMoments->m00) / height; blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; if (blob.nFingers != 0){ blob.nFingers = 0; blob.fingers.clear(); } // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( contour_ptr, &reader, 0 ); for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, contour_ptr->total); j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blob.pts.push_back( ofPoint((float)pt.x / width, (float)pt.y / height) ); } blob.nPts = blob.pts.size(); // Check if it´s a Hand and if it have fingers // if (area > 5000){ CvPoint* PointArray; int* hull; int hullsize; CvSeq* contourAprox = cvApproxPoly(contour_ptr, sizeof(CvContour), storage, CV_POLY_APPROX_DP, hullPress, 1 ); int count = contourAprox->total; // This is number point in contour PointArray = (CvPoint*)malloc( count*sizeof(CvPoint) ); // Alloc memory for contour point set. hull = (int*)malloc(sizeof(int)*count); // Alloc memory for indices of convex hull vertices. cvCvtSeqToArray(contourAprox, PointArray, CV_WHOLE_SEQ); // Get contour point set. // Find convex hull for curent contour. cvConvexHull( PointArray, count, NULL, CV_COUNTER_CLOCKWISE, hull, &hullsize); int upper = 1, lower = 0; for (int j=0; j<hullsize; j++) { int idx = hull[j]; // corner index if (PointArray[idx].y < upper) upper = PointArray[idx].y; if (PointArray[idx].y > lower) lower = PointArray[idx].y; } float cutoff = lower - (lower - upper) * 0.1f; // find interior angles of hull corners for (int j=0; j < hullsize; j++) { int idx = hull[j]; // corner index int pdx = idx == 0 ? count - 1 : idx - 1; // predecessor of idx int sdx = idx == count - 1 ? 0 : idx + 1; // successor of idx cv::Point v1 = cv::Point(PointArray[sdx].x - PointArray[idx].x, PointArray[sdx].y - PointArray[idx].y); cv::Point v2 = cv::Point(PointArray[pdx].x - PointArray[idx].x, PointArray[pdx].y - PointArray[idx].y); float angle = acos( (v1.x*v2.x + v1.y*v2.y) / (norm(v1) * norm(v2)) ); // We got a finger // if (angle < 1 ){ ofPoint posibleFinger = ofPoint((float)PointArray[idx].x / width, (float)PointArray[idx].y / height); blob.nFingers++; blob.fingers.push_back( posibleFinger ); } } if ( blob.nFingers > 0 ){ // because means that probably it's a hand ofVec2f fingersAverage; for (int j = 0; j < blob.fingers.size(); j++){ fingersAverage += blob.fingers[j]; } fingersAverage /= blob.fingers.size(); if (blob.gotFingers){ blob.palm = (blob.palm + fingersAverage)*0.5; //blob.palm = fingersAverage; } else { blob.palm = fingersAverage; blob.gotFingers = true; // If got more than three fingers in a road it'll remember } } // Free memory. free(PointArray); free(hull); } blobs.push_back(blob); } contour_ptr = contour_ptr->h_next; } nBlobs = blobs.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) cvReleaseMemStorage(&contour_storage); if( storage != NULL ) cvReleaseMemStorage(&storage); free(contour_ptr); return nBlobs; }
//-------------------------------------------------------------------------------- int ofxCvMyContourFinder::findContours( IplImage* input, int minArea, int maxArea, int nConsidered, bool bFindHoles, int approximation) { // get width/height disregarding ROI _width = input->width; _height = input->height; reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ofxCvMyContourFinder objects then to use // one, because you will get penalized less. if( !inputCopy ) { inputCopy = cvCreateImage(cvSize(_width,_height), input->depth, input->nChannels); } else if( inputCopy->width != _width || inputCopy->height != _height ) { // reallocate to new size cvReleaseImage(&inputCopy); inputCopy = cvCreateImage(cvSize(_width,_height), input->depth, input->nChannels); } cvSetImageROI(inputCopy, cvGetImageROI(input)); cvCopy(input, inputCopy); contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; CvContourScanner scanner = cvStartFindContours( inputCopy, contour_storage, sizeof(CvContour), retrieve_mode, CV_CHAIN_APPROX_SIMPLE); CvSeq* c; int numCont = 0; while(( c = cvFindNextContour(scanner)) != NULL) { CvSeq* c_new; if( approximation > 0){ c_new = cvApproxPoly( c, sizeof(CvContour), contour_storage, CV_POLY_APPROX_DP, approximation, 0 ); } else { c_new = cvConvexHull2( c, contour_storage, CV_CLOCKWISE, 1 ); } float area = fabs( cvContourArea(c_new, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { cvSeqBlobs.push_back(c_new); } numCont++; } // cvEndFindContours(scanner); // sort the pointers based on size if( cvSeqBlobs.size() > 1 ) { sort( cvSeqBlobs.begin(), cvSeqBlobs.end(), mysort_carea_compare ); } // now, we have cvSeqBlobs.size() contours, sorted by size in the array // cvSeqBlobs let's get the data out and into our structures that we like for( int i = 0; i < MIN(nConsidered, (int)cvSeqBlobs.size()); i++ ) { myblobs.push_back( ofxCvMyBlob() ); float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ ); CvRect rect = cvBoundingRect( cvSeqBlobs[i], 0 ); cvMoments( cvSeqBlobs[i], myMoments ); myblobs[i].area = fabs(area); myblobs[i].hole = area < 0 ? true : false; myblobs[i].length = cvArcLength(cvSeqBlobs[i]); myblobs[i].boundingRect.x = rect.x; myblobs[i].boundingRect.y = rect.y; myblobs[i].boundingRect.width = rect.width; myblobs[i].boundingRect.height = rect.height; if(cvSeqBlobs[i]->total >= 6){ myblobs[i].box2D_cv = cvMinAreaRect2(cvSeqBlobs[i]); } myblobs[i].bounding_cv = cvBoundingRect(cvSeqBlobs[i]); double x = (myMoments->m10 / myMoments->m00); double y = (myMoments->m01 / myMoments->m00); myblobs[i].centroid.x = (int)x; myblobs[i].centroid.y = (int)y; myblobs[i].centroid_cv = cvPoint2D32f(x,y); // myblobs[i].contour = (CvPoint *)malloc(cvSeqBlobs[i]->total * sizeof(CvPoint)); // cvCvtSeqToArray(cvSeqBlobs[i], myblobs[i].contour, CV_WHOLE_SEQ); // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( cvSeqBlobs[i], &reader, 0 ); for( int j=0; j < cvSeqBlobs[i]->total; j++ ) { CV_READ_SEQ_ELEM( pt, reader ); myblobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } myblobs[i].nPts = myblobs[i].pts.size(); } nBlobs = myblobs.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); } if( storage != NULL ) { cvReleaseMemStorage(&storage); } return nBlobs; }
int main() { bool salir=FALSE; do { IplImage *im; char eleccion; bool j=TRUE; //Panel printf("Elija la imagen que quiere cargar\n"); printf("Imagenes del programa:\n\n" "A=2_bolas\n" "B=3_bolas\n" "C=4_bolas\n" "D=6_bolas\n" "E=bola_azul\n" "F=bola_roja\n" "G=bolas_cortadas\n" "H=bola_amarilla_blanca\n" "I=bola_amarilla_blanca_+intensidad\n" "J=bola_amarilla1\n" "K=bolas_cortadas_+intensidad\n" "L=bolas_juntas\n" "M=cambio_angulo_iluminacion\n" "N=bolas_pegadas_1\n" "O=bolas_pegadas_2\n" "P=bolas_pegadas_3\n" "Q=bolas_pegadas_4\n" "R=bolas_pegadas_4_+intensidad\n" "S=bolas_pegadas_rotas\n" "T=bolas_pegadas_rotas_2\n" ); printf("X=SALIR\n\n"); while(j==TRUE) { scanf("%c",&eleccion); switch(eleccion) { case 'A':{ char NombreImagen[]="2_bolas.jpg"; im=cvLoadImage(NombreImagen, -1); j=FALSE;} break; case 'B': {char NombreImagen[]="3_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'C': { char NombreImagen[]="4_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'D': { char NombreImagen[]="6_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'E': { char NombreImagen[]="bola_azul.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'F': {char NombreImagen[]="bola_roja.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'G': {char NombreImagen[]="bolas_cortadas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'H': {char NombreImagen[]="bola_amarilla_blanca.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'I': { char NombreImagen[]="bola_amarilla_blanca_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'J': { char NombreImagen[]="bola_amarilla1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'K': { char NombreImagen[]="bolas_cortadas_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'L': { char NombreImagen[]="bolas_juntas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'M': {char NombreImagen[]="cambio_angulo_iluminacion.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'N': {char NombreImagen[]="bolas_pegadas_1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'O': {char NombreImagen[]="bolas_pegadas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'P': {char NombreImagen[]="bolas_pegadas_3.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'Q': {char NombreImagen[]="bolas_pegadas_4.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'R': {char NombreImagen[]="bolas_pegadas_4_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'S': {char NombreImagen[]="bolas_pegadas_rotas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'T': {char NombreImagen[]="bolas_pegadas_rotas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'X': {salir=TRUE; return 0;} break; default:{ printf("Eleccion incorrecta, vuelva a elegir una opcion\n"); j=TRUE; } } } //-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //OBTENER UNA IMAGEN BINARIA SÓLO CON BOLAS AZULES Y OTRA SÓLO CON BOLAS ROJAS IplImage *Imagen_RGB; IplImage *Imagen_umbr; IplImage *Imagen_umbr_2; CvSize Dimensiones; //umbrales de la imagenS y la imagenH. En esta parte no utilizo la función MinMax porque me sale mejor poniendo unos umbrales fijos int umbral1=150; int umbral2=100; //pasamos de BGR a RGB Dimensiones= cvGetSize(im); Imagen_RGB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3); cvCvtColor(im,Imagen_RGB,CV_BGR2RGB); IplImage *ImagenHSV; IplImage *ImagenH,*ImagenS,*ImagenV; //pasamos de RGB a HSV ImagenHSV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3); cvCvtColor(Imagen_RGB,ImagenHSV,CV_RGB2HSV); //Extraemos de la imagen HSV sus tres componentes: H, S y V ImagenH=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); ImagenS=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); ImagenV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); cvSplit(ImagenHSV,ImagenH,ImagenS,ImagenV,0); //imagenes binarias para umbralizar Sy H Imagen_umbr=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); Imagen_umbr_2=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); //umbralizacion. cvThreshold(ImagenS,Imagen_umbr,umbral1,255,CV_THRESH_BINARY); cvThreshold(ImagenH,Imagen_umbr_2,umbral2,255,CV_THRESH_BINARY_INV); //Descompongo la imagen en R,G y B IplImage *ImagenR=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); IplImage *ImagenG=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); IplImage *ImagenB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); cvSplit(Imagen_RGB,ImagenR,ImagenG,ImagenB,0); //A partir de aquí hago una serie de transformaciones morfológicas para separar en imágenes binarias las bolas azules de las rojas. //creo elemento estructurante IplConvKernel* element = 0; const int element_shape =CV_SHAPE_ELLIPSE; int pos=1; element= cvCreateStructuringElementEx(pos*2+1,pos*2+1,pos,pos, element_shape,0); IplImage * temp= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage *temp2=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage *resta=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); //con esto obtengo todas las bolas binarizadas cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_TOPHAT,2); //tophat. Me detecta sólo las sombras de las bolas. Mi iluminación iene de arriba. //cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2); Esto podria aplicarlo si las sombras se crearan en el lado contrario cvAbsDiff (Imagen_umbr, temp ,temp); //resto la original - el tophat cvMorphologyEx(temp,temp,temp, NULL,CV_MOP_CLOSE,6); //aplico el cierre //Con esto obtengo las bolas azules binarizadas cvMorphologyEx(Imagen_umbr_2,temp2,temp2, NULL,CV_MOP_TOPHAT,1); //tophat //cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2); cvAbsDiff (Imagen_umbr_2, temp2 ,temp2); //resto la original - el tophat cvMorphologyEx(temp2,temp2,temp2, NULL,CV_MOP_CLOSE,6); //aplico el cierre //Dilato y erosiono el mismo número de veces, para que las bolas me queden mas o menos del mismo tamaño. Además lo hago muchas veces(15), para eliminar los //máximos defectos posibles debido a sombras y cambios y contrastes debido a la iluminación cvDilate(temp2,temp2,element,15); cvErode(temp2,temp2,element,15); cvAbsDiff (temp2, temp ,resta); // Resto la imagen de todas las bolas -la imagen de las bolas azules, dilato mcuhas veces y erosiono muchas veces, //y finalmente solo me quedan las rojas cvDilate(resta,resta,element,15);//dilato cvErode(resta,resta,element,15);//erosiono //Puede que algun contorno no deseado aún permanezca en la imagen binaria. Como aplico las mismas transformaciones morfológicas a las dos imágenes binarias //tendré el mismo defecto en las dos imagenes, así que obtengo una imagen sólo los defectos, y después resto los defectos a las dos imágenes. IplImage * temp3= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * temp4= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * Im_defectos_comunes= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * Im_bolas_azules= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * Im_bolas_rojas= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); cvThreshold(temp2,temp3,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas rojas cvThreshold(resta,temp4,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas azules cvAnd(temp3,temp4,Im_defectos_comunes,NULL);//multiplico las dos imagenes, la imagen que obtengo solo aparecen los defectos comunes cvAbsDiff (temp2,Im_defectos_comunes,Im_bolas_azules);//resto los defectos a las bolas azules cvAbsDiff (resta, Im_defectos_comunes ,Im_bolas_rojas);//resto los defectos a las bolas rojas //Ya tengo una imagen binaria sólo con las bolas azules y otra sólo con las rojas. //------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //CALCULAR HISTOGRAMA DE LA IMAGEN G //Nueva imagen para dibujar el histograma IplImage *histImage; //Variables para el histograma int hist_size=256; int NivelGris; float NumPixels; //Estructura histograma para guardar la informacion CvHistogram *hist; //Nueva imagen para dibujar el histograma histImage = cvCreateImage(cvSize(256,256), 8, 1); //Estructura histograma para guardar la informacion hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY,NULL, 1); //calcular el histograma. Lo hago con la imagenG, ya que hay más contraste que en la imagen en escala de grises, pero también funcionaria con la imagen de escala de grises cvCalcHist(&ImagenG,hist,0,NULL); cvSetZero(histImage); long Histograma[256]; //dibujo el histograma for(NivelGris=0;NivelGris<hist_size;++NivelGris) { NumPixels=cvQueryHistValue_1D(hist,NivelGris)/15; cvLine(histImage,cvPoint(NivelGris,256),cvPoint(NivelGris,256-NumPixels),CV_RGB(255,255,255),1,8,0); Histograma[NivelGris]=NumPixels;//meto en un array el numero de pixels para cada nivel de gris } cvReleaseHist(&hist); cvSaveImage("Histograma.jpg",histImage,0); //------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ //UMBRALIZACIÓN DE LA IMAGEN G IplImage *imagen_bin; CvMemStorage *Memoria; CvSeq *Contorno, *Primer_Contorno; int Nc; //imagen=cvLoadImage("herramientas.tif",CV_LOAD_IMAGE_GRAYSCALE); imagen_bin=cvCreateImage(cvGetSize(ImagenG),8,1); //imagen_color=cvCreateImage(cvGetSize(ImagenG),8,3); //umbralizar la ImagenG int umbral; umbral=MinMax(Histograma); //Para algunas imagenes, debido a que tienen mas iluminacion o se introducen otros objetos como la mano, en el histograma las gausianas se juntan mucho o solo aparece //una. En este caso la función MinMAx() calcula un umbral muy alto y hace que no se detecten los contornos de algunas bolas, asi que establezco un umbral máximo if(umbral>100) { umbral=100; } cvLine(histImage,cvPoint(umbral,256),cvPoint(umbral,0),CV_RGB(255,255,255),1,8,0);//uDibujo el umbral en el histograma cvThreshold(ImagenG,imagen_bin,umbral,255,CV_THRESH_BINARY_INV);//Binarizo la imagen G cvMorphologyEx(imagen_bin,imagen_bin,imagen_bin, NULL,CV_MOP_CLOSE,6);//Alplico cierre para eliminar los cambios de contraste en el interior de las bolas //debido al reflejo al reflejo de la luz //--------------------------------------------------------------------------------------------------------------------------------------------------------------------- // CÁLCULO DE CONTORNOS, ÁREAS, PERÍMETROS, CAJAS Y CENTROS DE CAJA EN LA IMAGEN G. IplConvKernel* element_2 = 0; const int element_shape_2 =CV_SHAPE_ELLIPSE; int pos_2=1; element_2= cvCreateStructuringElementEx(pos_2*2+1,pos_2*2+1,pos_2,pos_2, element_shape_2,0); Memoria=cvCreateMemStorage(); bool k=FALSE; int n=0; bool pelotas_juntas=FALSE; int i; double *perimetro; double *area; CvBox2D *BoundBox; CvPoint *centro; int bolas_rotas_azules=0; int bolas_rotas_rojas=0; CvScalar s3; Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL); perimetro=(double*)malloc(Nc*sizeof(double)); area=(double*)malloc(Nc*sizeof(double)); BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D)); centro=(CvPoint*)malloc(Nc*sizeof(CvPoint)); for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i) { area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ); perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1); BoundBox[i]=cvMinAreaRect2(Contorno,NULL); } for(i=0;i<Nc;++i) { centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y); } //---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //DETECTAR BOLAS ROTAS IplImage * inv_bolas_azules, *inv_bolas_rojas; CvMemStorage *storage_2; CvMemStorage *storage_3; CvSeq *Contorno_2, *Primer_Contorno_2; CvSeq *Contorno_3, *Primer_Contorno_3; int Nc_2; int Nc_3; double *area_2; double *area_3; CvBox2D *BoundBox_2; CvBox2D *BoundBox_3; CvPoint *centro_2; CvPoint *centro_3; inv_bolas_azules=cvCreateImage(cvGetSize(Im_bolas_azules),8,1); inv_bolas_rojas=cvCreateImage(cvGetSize(Im_bolas_rojas),8,1); cvThreshold(Im_bolas_azules,inv_bolas_azules,128,255,CV_THRESH_BINARY_INV); cvThreshold(Im_bolas_rojas,inv_bolas_rojas,128,255,CV_THRESH_BINARY_INV); storage_2=cvCreateMemStorage(); storage_3=cvCreateMemStorage(); //detecto las bolas rotas azules Nc_2=cvFindContours(inv_bolas_azules,storage_2,&Primer_Contorno_2,sizeof(CvContour),CV_RETR_EXTERNAL); //Encuentro cotornos en la imagen binaria donde sólo aparecen //las bolas azules area_2=(double*)malloc(Nc_2*sizeof(double));//tamaño del vector area BoundBox_2=(CvBox2D*)malloc(Nc_2*sizeof(CvBox2D));//tamaño del vector BoundBox_2 centro_2=(CvPoint*)malloc(Nc_2*sizeof(CvPoint));//tamaño del vector centro_2 for(i=0,Contorno_2=Primer_Contorno_2;Contorno_2!=NULL;Contorno_2=Contorno_2->h_next,++i) { area_2[i]=cvContourArea(Contorno_2,CV_WHOLE_SEQ);//Hallo el area de cada contorno BoundBox_2[i]=cvMinAreaRect2(Contorno_2,NULL);//Hallo las caja de cada contorno } for(i=0;i<Nc_2;++i) { centro_2[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);// Hallo el centro de cada contorno } //Para cada contorno, si su area es menor que 2500, es que se trata de una bola rota for(i=0;i<Nc_2;++i) { if(area_2[i]<2500) { bolas_rotas_azules++; DibujarBox2D(im,BoundBox_2[i]); printf("Bola rota azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } //Detecto las bolas rotas rojas // Es el mismo procedimiento que para detectar las bolas rotas azules, pero encontrando contornos en la imagen binaria donde solo aparecen las bolas rojas Nc_3=cvFindContours(inv_bolas_rojas,storage_3,&Primer_Contorno_3,sizeof(CvContour),CV_RETR_EXTERNAL); area_3=(double*)malloc(Nc_3*sizeof(double)); BoundBox_3=(CvBox2D*)malloc(Nc_3*sizeof(CvBox2D)); centro_3=(CvPoint*)malloc(Nc*sizeof(CvPoint)); for(i=0,Contorno_3=Primer_Contorno_3;Contorno_3!=NULL;Contorno_3=Contorno_3->h_next,++i) { area_3[i]=cvContourArea(Contorno_3,CV_WHOLE_SEQ); BoundBox_3[i]=cvMinAreaRect2(Contorno_3,NULL); } for(i=0;i<Nc_3;++i) { centro_3[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y); } for(i=0;i<Nc_3;++i) { if(area_3[i]<2000) { bolas_rotas_rojas++; DibujarBox2D(im,BoundBox_3[i]); printf("Bola rota roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } //--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //CASO DE LAS BOLAS JUNTAS // En el caso de que haya dos o más bolas juntas, el programa encuentra un contorno con el área de todas las bolas que están juntas. Para solucionar este problema //utilizo el perímetro de los contornos. Elijo un valor umbral para el perímetro en el que me aseguro que se han separado todas las bolas. Así, si existe un perímetro //mayor al umbral, erosiono la imagen hasta que todos los perímetros sean menores que ese umbral. // Para detectar si hay bolas juntas, compruebo si existe algún controno que tenga el área mayor que el de una bola . for(i=0;i<Nc;++i) { if(area[i]>4000)//si existe el área de un contorno mayor al área de una bola { k=TRUE; pelotas_juntas=TRUE; } } while(k==TRUE)// Se mete en este bucle si ha encontrado algun área mayor que el de una bola { k=FALSE; Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL); perimetro=(double*)malloc(Nc*sizeof(double)); area=(double*)malloc(Nc*sizeof(double)); BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D)); centro=(CvPoint*)malloc(Nc*sizeof(CvPoint)); for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i) { area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ); perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1); BoundBox[i]=cvMinAreaRect2(Contorno,NULL); } for(i=0;i<Nc;++i) { centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y); } for(i=0;i<Nc;++i) { if(perimetro[i]>100) { k=TRUE; cvErode(imagen_bin,imagen_bin,element_2,1); } } } //------------------------------------------------------------------------------------------------------------------------------------------------------------ //CONOCER EL NÚMERO DE BOLAS DE CADA COLOR Y SUS RESPECTIVAS POSICIONES int bolas_azules=0; int bolas_rojas=0; int mano=0; double radio=0.0; CvScalar s; CvScalar s2; //Diferenciar bolas en el caso de que no haya bolas juntas if( pelotas_juntas==FALSE) { //Bolas azules for(i=0;i<Nc;++i)//bucle para todods los contornos { s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen de bolas azules if(s.val[0]==0)// si es 0,es que puede haber una bola azul o una bola rota azul { if(area[i]>2000 && area[i]<4000)//bola azul { bolas_azules++; radio=sqrt(area[i]/3.14); cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0x00,0xff,0xff)); printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } } //Bolas rojas for(i=0;i<Nc;++i)//bucle para todos los contornos { s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas if(s2.val[0]==0)// si es 0,es que puede haber bola roja o bola rota roja { if(area[i]>2000 && area[i]<4000)//bola roja { bolas_rojas++; radio=sqrt(area[i]/3.14); cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0xff,0x00,0x00)); printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } } } if( pelotas_juntas==TRUE) { float radio=30;//Como en el caso de qhe haya bolas juntas erosiono la imagen hasta separlas, no tengo las áreas reales de las bolas, así que //estipulo un radio aproximado . //Bolas azules for(i=0;i<Nc;++i) { s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen con bolas azules if(s.val[0]==0)// si es 0,es que hay bola azul. En este caso no existe la posibilidad de que haya bolas rotas porque al erosionar solo permanecen los contornos //con un perímetro mayor al de una bola. El perímetro de una bola rota siempre será menor { cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0x00,0xff,0xff)); bolas_azules++; printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } //Bolas rojas for(i=0;i<Nc;++i)//bucle para todos los contornos { s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas if(s2.val[0]==0)// si es 0,es que hay una bola roja { cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0xff,0x00,0x00)); bolas_rojas++; printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } } printf("bolas azules:%d\n",bolas_azules); printf("bolas rotas azules:%d\n", bolas_rotas_azules); printf("bolas rojas:%d\n",bolas_rojas); printf("bolas rotas rojas:%d\n\n",bolas_rotas_rojas); printf("ORDENAR AL ROBOT\n\n\n"); if(bolas_rotas_azules>0) { printf("METER BOLAS AZULES DEFECTUOSAS EN CAJA DE BOLAS AZULES DEFECTUOSAS\n\n"); } if(bolas_rotas_rojas>0) { printf("METER BOLAS ROJAS DEFECTUOSAS EN CAJA DE BOLAS ROJAS DEFECTUOSAS\n\n"); } if(bolas_azules>0 || bolas_rojas>0) { printf("EMPAQUETAR BOLAS\n\n"); } //---------------------------------------------------------------------------------------------------------------------------------------------------------------------- cvWaitKey(0); //-------------------------------------------------------------------------------------------------------------------------------------------------------------------- //PANTALLA cvNamedWindow("Original", CV_WINDOW_AUTOSIZE); cvShowImage("Original", im ); //cvNamedWindow("imagen_bin", CV_WINDOW_AUTOSIZE); //cvShowImage("imagen_bin", imagen_bin ); //Mostrar el plano de color rojo, verde y azul //cvNamedWindow("R", CV_WINDOW_AUTOSIZE); //cvShowImage("R",ImagenR); //cvNamedWindow("G", CV_WINDOW_AUTOSIZE); //cvShowImage("G",inv_bolas_azules); //cvNamedWindow("B", CV_WINDOW_AUTOSIZE); //cvShowImage("B",inv_bolas_rojas); cvNamedWindow("bolas_azules", CV_WINDOW_AUTOSIZE); cvShowImage("bolas_azules",Im_bolas_azules); cvNamedWindow("bolas_rojas", CV_WINDOW_AUTOSIZE); cvShowImage("bolas_rojas",Im_bolas_rojas); //Mostrar la imagen cvNamedWindow("Histograma de G", CV_WINDOW_AUTOSIZE); cvShowImage("Histograma de G", histImage ); cvWaitKey(0); //--------------------------------------------------------------------------------------------------------------------------------------------------------------- //LIBERAR MEMORIA cvDestroyAllWindows(); cvReleaseImage(&ImagenR); cvReleaseImage(&ImagenG); cvReleaseImage(&ImagenB); cvReleaseImage(&imagen_bin); cvReleaseImage(&histImage); cvReleaseImage(&im); cvReleaseImage(&Imagen_RGB); cvReleaseImage(&Imagen_umbr); cvReleaseImage(&Imagen_umbr_2); cvReleaseImage(&ImagenHSV); cvReleaseImage(&ImagenH); cvReleaseImage(&ImagenS); cvReleaseImage(&ImagenV); cvReleaseImage(&temp); cvReleaseImage(&temp2); cvReleaseImage(&temp3); cvReleaseImage(&temp4); cvReleaseImage(&Im_defectos_comunes); cvReleaseImage(&Im_bolas_azules); cvReleaseImage(&Im_bolas_rojas); cvReleaseImage(&inv_bolas_rojas); cvReleaseImage(&inv_bolas_azules); }while(salir==FALSE); return 0; }
void process_QR(IplImage* img, QR_Data * data, IplImage* outimg) { // Data extracted from the ZBar Image int width = 0; int height = 0; void *raw = NULL; // Data from the QR code and its position/angle int qr_length = 0; // The length of the code in pixels double qr_distance = 0; // How far the qr code is (altitude) double qr_angle = 0; // Angle of the code from the right x-axis double qr_angle_deg = 0;// Same as above but in degrees double dis2Mid = 0; // Distance from the camera middle to code double theta1 = 0; // the arctan of the y' and x' axes double theta2 = 0; // the angle between the two axes double theta2_deg = 0; // theta2 in radians double x_d = 0; double y_d = 0; double x_ab = 0; double y_ab = 0; int qr_x, qr_y; // The data from the QR Code char text[80]; // ZBar Scanner for C zbar_image_scanner_t* scanner = zbar_image_scanner_create(); // configure the scanner zbar_image_scanner_set_config(scanner, 0, ZBAR_CFG_ENABLE, 1); // Extract data from the image width = img->width; height = img->height; raw = (void *) img->imageData; // Wrap the image data zbar_image_t *image = zbar_image_create(); zbar_image_set_format(image, *(int*)"Y800"); zbar_image_set_size(image, width, height); zbar_image_set_data(image, raw, width * height, zbar_image_free_data); // Scan the image for QR int n = zbar_scan_image(scanner, image); /* extract results */ const zbar_symbol_t *symbol = zbar_image_first_symbol(image); for(; symbol; symbol = zbar_symbol_next(symbol)) { // Cycle through each symbol found zbar_symbol_type_t typ = zbar_symbol_get_type(symbol); const char *data = zbar_symbol_get_data(symbol); printf("decoded %s symbol \"%s\"\n", zbar_get_symbol_name(typ), data); sscanf(data, "%d %d", &qr_x, &qr_y); printf("QR_X: %i\n", qr_x); printf("QR_Y: %i\n", qr_y); // Find the angle between the lines CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* ptseq = cvCreateSeq(CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage); CvPoint pts[4]; int i = 0; for (i = 0; i < 4; ++i) { CvPoint point = cvPoint(zbar_symbol_get_loc_x(symbol,i), zbar_symbol_get_loc_y(symbol,i)); cvSeqPush(ptseq, &point); pts[i] = point; } CvBox2D rect = cvMinAreaRect2(ptseq, 0); // Draw the outline rectangle for (i = 0; i < 4; ++i) { cvLine(outimg, pts[i], pts[(i+1)%4], CV_RGB(0, 0, 255), 5, 8, 0); } // Get the distance from the code to the camera qr_length = sqrt(abs(pts[0].x * pts[0].x - pts[1].x * pts[1].x) + abs(pts[0].y * pts[0].y - pts[1].y * pts[1].y)); qr_distance = qr_length * DISTANCE_M + DISTANCE_B; printf("Length: %i\n", qr_length); printf("Distance: %f\n", qr_distance); // Find the relative location // Get the angle of the circled rectangle qr_angle = -rect.angle; if (pts[0].x > pts[3].x && pts[0].y > pts[3].y) qr_angle += 90; else if (pts[0].x > pts[3].x && pts[0].y < pts[3].y) qr_angle += 180; else if (pts[0].x < pts[3].x && pts[0].y < pts[3].y) qr_angle += 270; else if (pts[0].x == pts[1].x && pts[0].y == pts[3].y) { if (pts[0].x < pts[3].x && pts[0].y < pts[1].y) qr_angle = 0; else qr_angle = 180; } else if (pts[0].x == pts[3].x && pts[0].y == pts[1].y) { if (pts[0].x < pts[1].x && pts[0].y > pts[3].y) qr_angle = 90; else qr_angle = 270; } printf("Angle: %f\n", qr_angle); //Draw a line on the angle qr_angle = qr_angle * 3.1415 / 180; CvPoint mid = cvPoint((pts[0].x + pts[2].x) / 2, (pts[0].y + pts[2].y)/2); CvPoint p2 = cvPoint(mid.x + 25*cos(qr_angle), mid.y - 25*sin(qr_angle)); cvLine(outimg,mid, p2, CV_RGB(0,255,0),5,8,0); // Get the relative location based on the data of the QR code // QR format: x y // x and y are seperated by a single space // Check if the QR is in the right format cvLine(outimg,mid, cvPoint(MID_X,MID_Y), CV_RGB(255,0,0),5,8,0); // Relative position (in pixel) dis2Mid = sqrt((mid.x - MID_X) * (mid.x - MID_X) + (mid.y - MID_Y) * (mid.y - MID_Y)); printf("Distance to Quad: %f\n", dis2Mid); theta1 = atan2(MID_Y - mid.y, MID_X - mid.x) * 180 / MATH_PI; qr_angle_deg = qr_angle * 180 / MATH_PI; theta2_deg = 90 - theta1 - qr_angle_deg; theta2 = theta2_deg * MATH_PI / 180; x_d = dis2Mid * sin(theta2); y_d = dis2Mid * cos(theta2); // Display message onto the image CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8); sprintf(text, "Attitude: %f", qr_distance); cvPutText(outimg, text, cvPoint(30,30), &font, cvScalar(255, 255, 255, 0)); sprintf(text, "Angle: %f", qr_angle_deg); cvPutText(outimg, text, cvPoint(30,50), &font, cvScalar(255, 255, 255, 0)); x_ab = x_d + qr_x; y_ab = y_d + qr_y; sprintf(text, "Abs. Pos: (%f, %f)", x_ab, y_ab); cvPutText(outimg, text, cvPoint(30,70), &font, cvScalar(255, 255, 255, 0)); } }
//-------------------------------------------------------------------------------- int ContourFinder::findContours( ofxCvGrayscaleImage& input, int minArea, int maxArea, int nConsidered, double hullPress, bool bFindHoles, bool bUseApproximation) { reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ContourFinder objects then to use // one, because you will get penalized less. if( inputCopy.width == 0 ) { inputCopy.allocate( input.width, input.height ); inputCopy = input; } else { if( inputCopy.width == input.width && inputCopy.height == input.height ) inputCopy = input; else { // we are allocated, but to the wrong size -- // been checked for memory leaks, but a warning: // be careful if you call this function with alot of different // sized "input" images!, it does allocation every time // a new size is passed in.... //inputCopy.clear(); inputCopy.allocate( input.width, input.height ); inputCopy = input; } } CvSeq* contour_list = NULL; contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list, sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); CvSeq* contour_ptr = contour_list; nCvSeqsFound = 0; // put the contours from the linked list, into an array for sorting while( (contour_ptr != NULL) ) { CvBox2D box = cvMinAreaRect2(contour_ptr); int objectId; // If the contour is an object, then objectId is its ID objectId = (bTrackObjects)? templates->getTemplateId(box.size.width,box.size.height): -1; if(objectId != -1 ) { //If the blob is a object Blob blob = Blob(); blob.id = objectId; blob.isObject = true; float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ ); cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = rect.x; blob.boundingRect.y = rect.y; blob.boundingRect.width = rect.width; blob.boundingRect.height = rect.height; //For anglebounding rectangle blob.angleBoundingBox=box; blob.angleBoundingRect.x = box.center.x; blob.angleBoundingRect.y = box.center.y; blob.angleBoundingRect.width = box.size.height; blob.angleBoundingRect.height = box.size.width; blob.angle = box.angle; //TEMPORARY INITIALIZATION TO 0, Will be calculating afterwards.This is to prevent sending wrong data blob.D.x = 0; blob.D.y = 0; blob.maccel = 0; // assign other parameters blob.area = fabs(area); blob.hole = area < 0 ? true : false; blob.length = cvArcLength(contour_ptr); blob.centroid.x = (myMoments->m10 / myMoments->m00); blob.centroid.y = (myMoments->m01 / myMoments->m00); blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( contour_ptr, &reader, 0 ); for( int j=0; j < contour_ptr->total; j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blob.nPts = blob.pts.size(); objects.push_back(blob); } else if(bTrackBlobs) { // SEARCH FOR BLOBS float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { Blob blob=Blob(); float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ ); cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = rect.x; blob.boundingRect.y = rect.y; blob.boundingRect.width = rect.width; blob.boundingRect.height = rect.height; //Angle Bounding rectangle blob.angleBoundingRect.x = box.center.x; blob.angleBoundingRect.y = box.center.y; blob.angleBoundingRect.width = box.size.height; blob.angleBoundingRect.height = box.size.width; blob.angle = box.angle; // assign other parameters blob.area = fabs(area); blob.hole = area < 0 ? true : false; blob.length = cvArcLength(contour_ptr); // AlexP // The cast to int causes errors in tracking since centroids are calculated in // floats and they migh land between integer pixel values (which is what we really want) // This not only makes tracking more accurate but also more fluid blob.centroid.x = (myMoments->m10 / myMoments->m00); blob.centroid.y = (myMoments->m01 / myMoments->m00); blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( contour_ptr, &reader, 0 ); for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, contour_ptr->total); j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blob.nPts = blob.pts.size(); blobs.push_back(blob); } } contour_ptr = contour_ptr->h_next; } if(bTrackFingers) { // SEARCH FOR FINGERS CvPoint* PointArray; int* hull; int hullsize; if (contour_list) contour_list = cvApproxPoly(contour_list, sizeof(CvContour), storage, CV_POLY_APPROX_DP, hullPress, 1 ); for( ; contour_list != 0; contour_list = contour_list->h_next ){ int count = contour_list->total; // This is number point in contour CvRect rect = cvContourBoundingRect(contour_list, 1); if ( (rect.width*rect.height) > 300 ){ // Analize the bigger contour CvPoint center; center.x = rect.x+rect.width/2; center.y = rect.y+rect.height/2; PointArray = (CvPoint*)malloc( count*sizeof(CvPoint) ); // Alloc memory for contour point set. hull = (int*)malloc(sizeof(int)*count); // Alloc memory for indices of convex hull vertices. cvCvtSeqToArray(contour_list, PointArray, CV_WHOLE_SEQ); // Get contour point set. // Find convex hull for curent contour. cvConvexHull( PointArray, count, NULL, CV_COUNTER_CLOCKWISE, hull, &hullsize); int upper = 640, lower = 0; for (int j=0; j<hullsize; j++) { int idx = hull[j]; // corner index if (PointArray[idx].y < upper) upper = PointArray[idx].y; if (PointArray[idx].y > lower) lower = PointArray[idx].y; } float cutoff = lower - (lower - upper) * 0.1f; // find interior angles of hull corners for (int j=0; j<hullsize; j++) { int idx = hull[j]; // corner index int pdx = idx == 0 ? count - 1 : idx - 1; // predecessor of idx int sdx = idx == count - 1 ? 0 : idx + 1; // successor of idx cv::Point v1 = cv::Point(PointArray[sdx].x - PointArray[idx].x, PointArray[sdx].y - PointArray[idx].y); cv::Point v2 = cv::Point(PointArray[pdx].x - PointArray[idx].x, PointArray[pdx].y - PointArray[idx].y); float angle = acos( (v1.x*v2.x + v1.y*v2.y) / (norm(v1) * norm(v2)) ); // low interior angle + within upper 90% of region -> we got a finger if (angle < 1 ){ //&& PointArray[idx].y < cutoff) { Blob blob = Blob(); //float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ ); //cvMoments( contour_ptr, myMoments ); // this is if using non-angle bounding box //CvRect rect = cvBoundingRect( contour_ptr, 0 ); blob.boundingRect.x = PointArray[idx].x-5; blob.boundingRect.y = PointArray[idx].y-5; blob.boundingRect.width = 10; blob.boundingRect.height = 10; //Angle Bounding rectangle blob.angleBoundingRect.x = PointArray[idx].x-5; blob.angleBoundingRect.y = PointArray[idx].y-5; blob.angleBoundingRect.width = 10; blob.angleBoundingRect.height = 10; blob.angle = atan2((float) PointArray[idx].x - center.x , (float) PointArray[idx].y - center.y); // assign other parameters //blob.area = fabs(area); //blob.hole = area < 0 ? true : false; //blob.length = cvArcLength(contour_ptr); // AlexP // The cast to int causes errors in tracking since centroids are calculated in // floats and they migh land between integer pixel values (which is what we really want) // This not only makes tracking more accurate but also more fluid blob.centroid.x = PointArray[idx].x;//(myMoments->m10 / myMoments->m00); blob.centroid.y = PointArray[idx].y;//(myMoments->m01 / myMoments->m00); blob.lastCentroid.x = 0; blob.lastCentroid.y = 0; fingers.push_back(blob); } } // Free memory. free(PointArray); free(hull); } } } nBlobs = blobs.size(); nFingers = fingers.size(); nObjects = objects.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) cvReleaseMemStorage(&contour_storage); if( storage != NULL ) cvReleaseMemStorage(&storage); return (bTrackFingers)? nFingers:nBlobs; }
IplImage * find_macbeth( const char *img ) { IplImage * macbeth_img = cvLoadImage( img, CV_LOAD_IMAGE_ANYCOLOR|CV_LOAD_IMAGE_ANYDEPTH ); IplImage * macbeth_original = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, macbeth_img->nChannels ); cvCopy(macbeth_img, macbeth_original); IplImage * macbeth_split[3]; IplImage * macbeth_split_thresh[3]; for(int i = 0; i < 3; i++) { macbeth_split[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 ); macbeth_split_thresh[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 ); } cvSplit(macbeth_img, macbeth_split[0], macbeth_split[1], macbeth_split[2], NULL); if( macbeth_img ) { int adaptive_method = CV_ADAPTIVE_THRESH_MEAN_C; int threshold_type = CV_THRESH_BINARY_INV; int block_size = cvRound( MIN(macbeth_img->width,macbeth_img->height)*0.02)|1; fprintf(stderr,"Using %d as block size\n", block_size); double offset = 6; // do an adaptive threshold on each channel for(int i = 0; i < 3; i++) { cvAdaptiveThreshold(macbeth_split[i], macbeth_split_thresh[i], 255, adaptive_method, threshold_type, block_size, offset); } IplImage * adaptive = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), IPL_DEPTH_8U, 1 ); // OR the binary threshold results together cvOr(macbeth_split_thresh[0],macbeth_split_thresh[1],adaptive); cvOr(macbeth_split_thresh[2],adaptive,adaptive); for(int i = 0; i < 3; i++) { cvReleaseImage( &(macbeth_split[i]) ); cvReleaseImage( &(macbeth_split_thresh[i]) ); } int element_size = (block_size/10)+2; fprintf(stderr,"Using %d as element size\n", element_size); // do an opening on the threshold image IplConvKernel * element = cvCreateStructuringElementEx(element_size,element_size,element_size/2,element_size/2,CV_SHAPE_RECT); cvMorphologyEx(adaptive,adaptive,NULL,element,CV_MOP_OPEN); cvReleaseStructuringElement(&element); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* initial_quads = cvCreateSeq( 0, sizeof(*initial_quads), sizeof(void*), storage ); CvSeq* initial_boxes = cvCreateSeq( 0, sizeof(*initial_boxes), sizeof(CvBox2D), storage ); // find contours in the threshold image CvSeq * contours = NULL; cvFindContours(adaptive,storage,&contours); int min_size = (macbeth_img->width*macbeth_img->height)/ (MACBETH_SQUARES*100); if(contours) { int count = 0; for( CvSeq* c = contours; c != NULL; c = c->h_next) { CvRect rect = ((CvContour*)c)->rect; // only interested in contours with these restrictions if(CV_IS_SEQ_HOLE(c) && rect.width*rect.height >= min_size) { // only interested in quad-like contours CvSeq * quad_contour = find_quad(c, storage, min_size); if(quad_contour) { cvSeqPush( initial_quads, &quad_contour ); count++; rect = ((CvContour*)quad_contour)->rect; CvScalar average = contour_average((CvContour*)quad_contour, macbeth_img); CvBox2D box = cvMinAreaRect2(quad_contour,storage); cvSeqPush( initial_boxes, &box ); // fprintf(stderr,"Center: %f %f\n", box.center.x, box.center.y); double min_distance = MAX_RGB_DISTANCE; CvPoint closest_color_idx = cvPoint(-1,-1); for(int y = 0; y < MACBETH_HEIGHT; y++) { for(int x = 0; x < MACBETH_WIDTH; x++) { double distance = euclidean_distance_lab(average,colorchecker_srgb[y][x]); if(distance < min_distance) { closest_color_idx.x = x; closest_color_idx.y = y; min_distance = distance; } } } CvScalar closest_color = colorchecker_srgb[closest_color_idx.y][closest_color_idx.x]; // fprintf(stderr,"Closest color: %f %f %f (%d %d)\n", // closest_color.val[2], // closest_color.val[1], // closest_color.val[0], // closest_color_idx.x, // closest_color_idx.y // ); // cvDrawContours( // macbeth_img, // quad_contour, // cvScalar(255,0,0), // cvScalar(0,0,255), // 0, // element_size // ); // cvCircle( // macbeth_img, // cvPointFrom32f(box.center), // element_size*6, // cvScalarAll(255), // -1 // ); // cvCircle( // macbeth_img, // cvPointFrom32f(box.center), // element_size*6, // closest_color, // -1 // ); // cvCircle( // macbeth_img, // cvPointFrom32f(box.center), // element_size*4, // average, // -1 // ); // CvRect rect = contained_rectangle(box); // cvRectangle( // macbeth_img, // cvPoint(rect.x,rect.y), // cvPoint(rect.x+rect.width, rect.y+rect.height), // cvScalarAll(0), // element_size // ); } } } ColorChecker found_colorchecker; fprintf(stderr,"%d initial quads found", initial_quads->total); if(count > MACBETH_SQUARES) { fprintf(stderr," (probably a Passport)\n"); CvMat* points = cvCreateMat( initial_quads->total , 1, CV_32FC2 ); CvMat* clusters = cvCreateMat( initial_quads->total , 1, CV_32SC1 ); CvSeq* partitioned_quads[2]; CvSeq* partitioned_boxes[2]; for(int i = 0; i < 2; i++) { partitioned_quads[i] = cvCreateSeq( 0, sizeof(**partitioned_quads), sizeof(void*), storage ); partitioned_boxes[i] = cvCreateSeq( 0, sizeof(**partitioned_boxes), sizeof(CvBox2D), storage ); } // set up the points sequence for cvKMeans2, using the box centers for(int i = 0; i < initial_quads->total; i++) { CvBox2D box = (*(CvBox2D*)cvGetSeqElem(initial_boxes, i)); cvSet1D(points, i, cvScalar(box.center.x,box.center.y)); } // partition into two clusters: passport and colorchecker cvKMeans2( points, 2, clusters, cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0 ) ); for(int i = 0; i < initial_quads->total; i++) { CvPoint2D32f pt = ((CvPoint2D32f*)points->data.fl)[i]; int cluster_idx = clusters->data.i[i]; cvSeqPush( partitioned_quads[cluster_idx], cvGetSeqElem(initial_quads, i) ); cvSeqPush( partitioned_boxes[cluster_idx], cvGetSeqElem(initial_boxes, i) ); // cvCircle( // macbeth_img, // cvPointFrom32f(pt), // element_size*2, // cvScalar(255*cluster_idx,0,255-(255*cluster_idx)), // -1 // ); } ColorChecker partitioned_checkers[2]; // check each of the two partitioned sets for the best colorchecker for(int i = 0; i < 2; i++) { partitioned_checkers[i] = find_colorchecker(partitioned_quads[i], partitioned_boxes[i], storage, macbeth_img, macbeth_original); } // use the colorchecker with the lowest error found_colorchecker = partitioned_checkers[0].error < partitioned_checkers[1].error ? partitioned_checkers[0] : partitioned_checkers[1]; cvReleaseMat( &points ); cvReleaseMat( &clusters ); } else { // just one colorchecker to test fprintf(stderr,"\n"); found_colorchecker = find_colorchecker(initial_quads, initial_boxes, storage, macbeth_img, macbeth_original); } // render the found colorchecker draw_colorchecker(found_colorchecker.values,found_colorchecker.points,macbeth_img,found_colorchecker.size); // print out the colorchecker info for(int y = 0; y < MACBETH_HEIGHT; y++) { for(int x = 0; x < MACBETH_WIDTH; x++) { CvScalar this_value = cvGet2D(found_colorchecker.values,y,x); CvScalar this_point = cvGet2D(found_colorchecker.points,y,x); printf("%.0f,%.0f,%.0f,%.0f,%.0f\n", this_point.val[0],this_point.val[1], this_value.val[2],this_value.val[1],this_value.val[0]); } } printf("%0.f\n%f\n",found_colorchecker.size,found_colorchecker.error); } cvReleaseMemStorage( &storage ); if( macbeth_original ) cvReleaseImage( &macbeth_original ); if( adaptive ) cvReleaseImage( &adaptive ); return macbeth_img; } if( macbeth_img ) cvReleaseImage( &macbeth_img ); return NULL; }
ColorChecker find_colorchecker(CvSeq * quads, CvSeq * boxes, CvMemStorage *storage, IplImage *image, IplImage *original_image) { CvPoint2D32f box_corners[4]; bool passport_box_flipped = false; bool rotated_box = false; CvMat* points = cvCreateMat( boxes->total , 1, CV_32FC2 ); for(int i = 0; i < boxes->total; i++) { CvBox2D box = (*(CvBox2D*)cvGetSeqElem(boxes, i)); cvSet1D(points, i, cvScalar(box.center.x,box.center.y)); } CvBox2D passport_box = cvMinAreaRect2(points,storage); fprintf(stderr,"Box:\n\tCenter: %f,%f\n\tSize: %f,%f\n\tAngle: %f\n",passport_box.center.x,passport_box.center.y,passport_box.size.width,passport_box.size.height,passport_box.angle); if(passport_box.angle < 0.0) { passport_box_flipped = true; } cvBoxPoints(passport_box, box_corners); // for(int i = 0; i < 4; i++) // { // fprintf(stderr,"Box corner %d: %d,%d\n",i,cvPointFrom32f(box_corners[i]).x,cvPointFrom32f(box_corners[i]).y); // } // cvBox(passport_box, image, cvScalarAll(128), 10); if(euclidean_distance(cvPointFrom32f(box_corners[0]),cvPointFrom32f(box_corners[1])) < euclidean_distance(cvPointFrom32f(box_corners[1]),cvPointFrom32f(box_corners[2]))) { fprintf(stderr,"Box is upright, rotating\n"); rotate_box(box_corners); rotated_box = true && passport_box_flipped; } double horizontal_spacing = euclidean_distance( cvPointFrom32f(box_corners[0]),cvPointFrom32f(box_corners[1]))/(double)(MACBETH_WIDTH-1); double vertical_spacing = euclidean_distance( cvPointFrom32f(box_corners[1]),cvPointFrom32f(box_corners[2]))/(double)(MACBETH_HEIGHT-1); double horizontal_slope = (box_corners[1].y - box_corners[0].y)/(box_corners[1].x - box_corners[0].x); double horizontal_mag = sqrt(1+pow(horizontal_slope,2)); double vertical_slope = (box_corners[3].y - box_corners[0].y)/(box_corners[3].x - box_corners[0].x); double vertical_mag = sqrt(1+pow(vertical_slope,2)); double horizontal_orientation = box_corners[0].x < box_corners[1].x ? -1 : 1; double vertical_orientation = box_corners[0].y < box_corners[3].y ? -1 : 1; fprintf(stderr,"Spacing is %f %f\n",horizontal_spacing,vertical_spacing); fprintf(stderr,"Slope is %f %f\n", horizontal_slope,vertical_slope); int average_size = 0; for(int i = 0; i < boxes->total; i++) { CvBox2D box = (*(CvBox2D*)cvGetSeqElem(boxes, i)); CvRect rect = contained_rectangle(box); average_size += MIN(rect.width, rect.height); } average_size /= boxes->total; fprintf(stderr,"Average contained rect size is %d\n", average_size); CvMat * this_colorchecker = cvCreateMat(MACBETH_HEIGHT, MACBETH_WIDTH, CV_32FC3); CvMat * this_colorchecker_points = cvCreateMat( MACBETH_HEIGHT, MACBETH_WIDTH, CV_32FC2 ); // calculate the averages for our oriented colorchecker for(int x = 0; x < MACBETH_WIDTH; x++) { for(int y = 0; y < MACBETH_HEIGHT; y++) { CvPoint2D32f row_start; if ( ((image->origin == IPL_ORIGIN_BL) || !rotated_box) && !((image->origin == IPL_ORIGIN_BL) && rotated_box) ) { row_start.x = box_corners[0].x + vertical_spacing * y * (1 / vertical_mag); row_start.y = box_corners[0].y + vertical_spacing * y * (vertical_slope / vertical_mag); } else { row_start.x = box_corners[0].x - vertical_spacing * y * (1 / vertical_mag); row_start.y = box_corners[0].y - vertical_spacing * y * (vertical_slope / vertical_mag); } CvRect rect = cvRect(0,0,average_size,average_size); rect.x = row_start.x - horizontal_spacing * x * ( 1 / horizontal_mag ) * horizontal_orientation; rect.y = row_start.y - horizontal_spacing * x * ( horizontal_slope / horizontal_mag ) * vertical_orientation; cvSet2D(this_colorchecker_points, y, x, cvScalar(rect.x,rect.y)); rect.x = rect.x - average_size / 2; rect.y = rect.y - average_size / 2; // cvRectangle( // image, // cvPoint(rect.x,rect.y), // cvPoint(rect.x+rect.width, rect.y+rect.height), // cvScalarAll(0), // 10 // ); CvScalar average_color = rect_average(rect, original_image); cvSet2D(this_colorchecker,y,x,average_color); } } double orient_1_error = check_colorchecker(this_colorchecker); cvFlip(this_colorchecker,NULL,-1); double orient_2_error = check_colorchecker(this_colorchecker); fprintf(stderr,"Orientation 1: %f\n",orient_1_error); fprintf(stderr,"Orientation 2: %f\n",orient_2_error); if(orient_1_error < orient_2_error) { cvFlip(this_colorchecker,NULL,-1); } else { cvFlip(this_colorchecker_points,NULL,-1); } // draw_colorchecker(this_colorchecker,this_colorchecker_points,image,average_size); ColorChecker found_colorchecker; found_colorchecker.error = MIN(orient_1_error,orient_2_error); found_colorchecker.values = this_colorchecker; found_colorchecker.points = this_colorchecker_points; found_colorchecker.size = average_size; return found_colorchecker; }
int main( int argc, char** argv ) { IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 ); #if !ARRAY CvMemStorage* storage = cvCreateMemStorage(0); #endif cvNamedWindow( "rect & circle", 1 ); for(;;) { char key; int i, count = rand()%100 + 1; CvPoint pt0, pt; CvBox2D box; CvPoint2D32f box_vtx[4]; CvPoint2D32f center; CvPoint icenter; float radius; #if !ARRAY CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage ); for( i = 0; i < count; i++ ) { pt0.x = rand() % (img->width/2) + img->width/4; pt0.y = rand() % (img->height/2) + img->height/4; cvSeqPush( ptseq, &pt0 ); } #ifndef _EiC /* unfortunately, here EiC crashes */ box = cvMinAreaRect2( ptseq, 0 ); #endif cvMinEnclosingCircle( ptseq, ¢er, &radius ); #else CvPoint* points = (CvPoint*)malloc( count * sizeof(points[0])); CvMat pointMat = cvMat( 1, count, CV_32SC2, points ); for( i = 0; i < count; i++ ) { pt0.x = rand() % (img->width/2) + img->width/4; pt0.y = rand() % (img->height/2) + img->height/4; points[i] = pt0; } #ifndef _EiC box = cvMinAreaRect2( &pointMat, 0 ); #endif cvMinEnclosingCircle( &pointMat, ¢er, &radius ); #endif cvBoxPoints( box, box_vtx ); cvZero( img ); for( i = 0; i < count; i++ ) { #if !ARRAY pt0 = *CV_GET_SEQ_ELEM( CvPoint, ptseq, i ); #else pt0 = points[i]; #endif cvCircle( img, pt0, 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 ); } #ifndef _EiC pt0.x = cvRound(box_vtx[3].x); pt0.y = cvRound(box_vtx[3].y); for( i = 0; i < 4; i++ ) { pt.x = cvRound(box_vtx[i].x); pt.y = cvRound(box_vtx[i].y); cvLine(img, pt0, pt, CV_RGB(0, 255, 0), 1, CV_AA, 0); pt0 = pt; } #endif icenter.x = cvRound(center.x); icenter.y = cvRound(center.y); cvCircle( img, icenter, cvRound(radius), CV_RGB(255, 255, 0), 1, CV_AA, 0 ); cvShowImage( "rect & circle", img ); key = (char) cvWaitKey(0); if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC' break; #if !ARRAY cvClearMemStorage( storage ); #else free( points ); #endif } cvDestroyWindow( "rect & circle" ); return 0; }
float mvContours::match_ellipse (IplImage* img, MvRBoxVector* ellipse_vector, COLOR_TRIPLE color, float min_lw_ratio, float max_lw_ratio, int method) { assert (img != NULL); assert (img->nChannels == 1); int n_contours = find_contour_and_check_errors(img); if (n_contours < 1 || m_contours == NULL) return -1; bin_calc.start(); CvSeq* c_contour = m_contours; int n_circles = 0; // debug //mvWindow window("contours"); // examine each contour, put the passing ones into the circle_vector for (int C = 0; C < n_contours; C++, c_contour = c_contour->h_next) { // debug /*cvZero (img); draw_contours (c_contour, img); window.showImage (img); cvWaitKey(0);*/ // check that there are at least 6 points if (c_contour->total < 6) { continue; } // check the contour's area to make sure it isnt too small double area = cvContourArea(c_contour); if (area < img->width*img->height/1000) { DEBUG_PRINT ("Ellipse Fail: Contour too small!\n"); continue; } // get min enclosing circle and radius //CvBox2D ellipse = cvFitEllipse2(c_contour); CvBox2D ellipse = cvMinAreaRect2(c_contour, m_storage); int height = ellipse.size.height; int width = ellipse.size.width; int a = height/2; int b = width/2; float height_to_width = static_cast<float>(height)/width; double perimeter = cvArcLength (c_contour, CV_WHOLE_SEQ, 1); if (height > img->width/2 || height < 0 || width > img->width/2 || width < 0) { continue; } // check length to width if (height_to_width < min_lw_ratio || height_to_width > max_lw_ratio) { DEBUG_PRINT ("Ellipse Fail: height_to_width = %6.2f\n", height_to_width); continue; } // do checks on area and perimeter double ellipse_area = (CV_PI*a*b); double ellipse_perimeter = CV_PI*(3*(a+b)-sqrt((3*a+b)*(a+3*b))); double area_ratio = area / ellipse_area; double perimeter_ratio = perimeter / ellipse_perimeter; DEBUG_PRINT ("Ellipse: area=%5.2lf/%5.2lf, perimeter=%5.2lf/%5.2lf\n", area, ellipse_area, perimeter, ellipse_perimeter); if (area_ratio < 0.75 || area_ratio > 1.25) { DEBUG_PRINT ("Ellipse Fail: Area: %6.2lf\n", area_ratio); continue; } if (perimeter_ratio < 0.75 || perimeter_ratio > 1.25) { DEBUG_PRINT ("Ellipse Fail: perimeter: %6.2lf\n", perimeter_ratio); continue; } MvRotatedBox rbox; rbox.center.x = ellipse.center.x; rbox.center.y = ellipse.center.y; rbox.length = height; rbox.width = width; rbox.angle = ellipse.angle; rbox.m1 = color.m1; rbox.m2 = color.m2; rbox.m3 = color.m3; assign_color_to_shape (color, &rbox); rbox.validity = area_ratio; ellipse_vector->push_back(rbox); //cvEllipse (img, cvPoint(ellipse.center.x,ellipse.center.y), cvSize(b,a), ellipse.angle, 0, 359, CV_RGB(50,50,50), 2); //window.showImage (img); //cvWaitKey(0); n_circles++; } bin_calc.stop(); return n_circles; }
//-------------------------------------------------------------------------------- int ContourFinder::findContours( ofxCvGrayscaleImage& input, int minArea, int maxArea, int nConsidered, bool bFindHoles, bool bUseApproximation) { reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ContourFinder objects then to use // one, because you will get penalized less. if( inputCopy.width == 0 ) { inputCopy.allocate( input.width, input.height ); inputCopy = input; } else { if( inputCopy.width == input.width && inputCopy.height == input.height ) { inputCopy = input; } else { // we are allocated, but to the wrong size -- // been checked for memory leaks, but a warning: // be careful if you call this function with alot of different // sized "input" images!, it does allocation every time // a new size is passed in.... //inputCopy.clear(); inputCopy.allocate( input.width, input.height ); inputCopy = input; } } CvSeq* contour_list = NULL; contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; teste = inputCopy.getCvImage(); cvFindContours( teste, contour_storage, &contour_list, sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); CvSeq* contour_ptr = contour_list; nCvSeqsFound = 0; // put the contours from the linked list, into an array for sorting while( (contour_ptr != NULL) ) { float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { if (nCvSeqsFound < TOUCH_MAX_CONTOUR_LENGTH){ cvSeqBlobs[nCvSeqsFound] = contour_ptr; // copy the pointer nCvSeqsFound++; } } contour_ptr = contour_ptr->h_next; } // sort the pointers based on size if( nCvSeqsFound > 0 ) { qsort( cvSeqBlobs, nCvSeqsFound, sizeof(CvSeq*), qsort_carea_compare); } // now, we have nCvSeqsFound contours, sorted by size in the array // cvSeqBlobs let's get the data out and into our structures that we like for( int i = 0; i < MIN(nConsidered, nCvSeqsFound); i++ ) { blobs.push_back( Blob() ); float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ ); cvMoments( cvSeqBlobs[i], myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( cvSeqBlobs[i], 0 ); blobs[i].boundingRect.x = rect.x; blobs[i].boundingRect.y = rect.y; blobs[i].boundingRect.width = rect.width; blobs[i].boundingRect.height = rect.height; cvCamShift(teste, rect, cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ), &track_comp, &track_box); // this is for using angle bounding box CvBox2D32f box; box = cvMinAreaRect2( cvSeqBlobs[i] ); blobs[i].angleBoundingRect.x = box.center.x; blobs[i].angleBoundingRect.y = box.center.y; blobs[i].angleBoundingRect.width = box.size.height; blobs[i].angleBoundingRect.height = box.size.width; blobs[i].angle = box.angle; // assign other parameters blobs[i].area = fabs(area); blobs[i].hole = area < 0 ? true : false; blobs[i].length = cvArcLength(cvSeqBlobs[i]); blobs[i].centroid.x = (int) (myMoments->m10 / myMoments->m00); blobs[i].centroid.y = (int) (myMoments->m01 / myMoments->m00); blobs[i].lastCentroid.x = (int) 0; blobs[i].lastCentroid.y = (int) 0; // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( cvSeqBlobs[i], &reader, 0 ); for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, cvSeqBlobs[i]->total); j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blobs[i].nPts = blobs[i].pts.size(); } nBlobs = blobs.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); } if( storage != NULL ) { cvReleaseMemStorage(&storage); } return nBlobs; }
float mvContours::match_rectangle (IplImage* img, MvRBoxVector* rbox_vector, COLOR_TRIPLE color, float min_lw_ratio, float max_lw_ratio, int method) { assert (img != NULL); assert (img->nChannels == 1); int n_contours = find_contour_and_check_errors (img); if (n_contours <= 0 || m_contours == NULL) return -1; bin_calc.start(); CvSeq* c_contour = m_contours; int n_boxes = 0; // debug //mvWindow window("contours"); // examine each contour, put the passing ones into the circle_vector for (int C = 0; C < n_contours; C++, c_contour = c_contour->h_next) { // debug /*cvZero (img); draw_contours (c_contour, img); window.showImage (img); cvWaitKey(0); */ // check that there are at least 6 points if (c_contour->total < 6) { DEBUG_PRINT ("Rect Fail: Contour has less than 6 points\n"); continue; } // check the contour's area to make sure it isnt too small double area = cvContourArea(c_contour); if (method == 0) { if (area < img->width*img->height/600) { DEBUG_PRINT ("Rect Fail: Contour too small!\n"); continue; } } CvBox2D Rect = cvMinAreaRect2(c_contour, m_storage); float angle = Rect.angle; float length = Rect.size.height; float width = Rect.size.width; // depending on which is the long side we assign the sides and angle differently if (length < width) { length = Rect.size.width; width = Rect.size.height; angle += 90; } if (length/width < min_lw_ratio || length/width > max_lw_ratio) { DEBUG_PRINT ("Rect Fail: length/width = %6.2f\n", length/width); continue; } double perimeter = cvArcLength (c_contour, CV_WHOLE_SEQ, 1); double perimeter_ratio = perimeter / (2*length+2*width); double area_ratio = area / (length*width); if (method == 0) { if (area_ratio < 0.75 || perimeter_ratio > 1.2 || perimeter_ratio < 0.85) { DEBUG_PRINT ("Rect Fail: Area / Peri: %6.2lf / %6.2lf\n", area_ratio, perimeter_ratio); continue; } } else if (method == 1) { if (area_ratio < 0.55 || perimeter_ratio > 1.4 || perimeter_ratio < 0.75) { DEBUG_PRINT ("Rect Fail: Area / Peri: %6.2lf / %6.2lf\n", area_ratio, perimeter_ratio); continue; } } MvRotatedBox rbox; rbox.center.x = Rect.center.x; rbox.center.y = Rect.center.y; rbox.length = length; rbox.width = width; rbox.angle = angle; rbox.m1 = color.m1; rbox.m2 = color.m2; rbox.m3 = color.m3; assign_color_to_shape (color, &rbox); rbox.validity = area_ratio; rbox_vector->push_back(rbox); // draw a line to indicate the angle /*CvPoint p0, p1; int delta_x = length/2 * -sin(angle*CV_PI/180.f); int delta_y = length/2 * cos(angle*CV_PI/180.f); p0.x = x - delta_x; p0.y = y - delta_y; p1.x = x + delta_x; p1.y = y + delta_y; cvLine (img, p0, p1, CV_RGB(50,50,50), 2); */ n_boxes++; } bin_calc.stop(); return n_boxes; }