vector<float> feature::getPAR(IplImage *src, int mask) { float perimeter, area, rc, i; perimeter = area = i = 0; CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contours = 0; cvFindContours(src, storage, &contours, sizeof(CvContour), CV_RETR_LIST); if (contours) { CvSeq* c = contours; for (; c != NULL; c = c->h_next) { if (cvContourArea(c) < 1000) continue; perimeter += cvArcLength(c); area += cvContourArea(c); // perimeter = perimeter > cvArcLength(c) ? perimeter : cvArcLength(c); // area = area > cvContourArea(c) ? area : cvContourArea(c); i++; //qDebug("\tmask = %d, i = %d, perimeter = %f, area = %f", mask, i, perimeter, area); } } if (area == 0) rc = -1; else rc = perimeter * perimeter / (4 * 3.14 * area); //form feature based on mask vector<float> PAR({perimeter, area, rc}); if (mask == 2) { PAR.push_back(i); } cvReleaseMemStorage(&storage); return PAR; }
static char ratioCheck(CvSeq* c1, CvSeq* c2, double areaRatioMax, double heightRatioMax){ double area1, area2; double height1, height2; double areaRatio, heightRatio; CvRect r1, r2; area1=fabs(cvContourArea(c1, CV_WHOLE_SEQ,0)); area2=fabs(cvContourArea(c2, CV_WHOLE_SEQ,0)); r1 = ((CvContour *) c1)->rect; r2 = ((CvContour *) c2)->rect; height1 = r1.height; height2 = r2.height; if(height1 > height2) areaRatio = height1/height2; else areaRatio = height2/height1; if(area1 > area2) areaRatio = area1/area2; else areaRatio = area2/area1; if(areaRatio <= areaRatioMax && heightRatio <= heightRatioMax) { return 1; } return 0; }
//-------------------------------------------------------------------------------- bool sort_carea_compare( const CvSeq* a, const CvSeq* b) { // use opencv to calc size, then sort based on size float areaa = fabs(cvContourArea(a, CV_WHOLE_SEQ)); float areab = fabs(cvContourArea(b, CV_WHOLE_SEQ)); //return 0; return (areaa > areab); }
//-------------------------------------------------------------------------------- static int qsort_carea_compare( const void* _a, const void* _b) { int out = 0; // pointers, ugh.... sorry about this CvSeq* a = *((CvSeq **)_a); CvSeq* b = *((CvSeq **)_b); // use opencv to calc size, then sort based on size float areaa = fabs(cvContourArea(a, CV_WHOLE_SEQ)); float areab = fabs(cvContourArea(b, CV_WHOLE_SEQ)); // note, based on the -1 / 1 flip // we sort biggest to smallest, not smallest to biggest if( areaa > areab ) { out = -1; } else { out = 1; } return out; }
int Contours::areaFilter(double min_area,double max_area){ double area; area=fabs(cvContourArea(this->c,CV_WHOLE_SEQ)); return area> min_area && area<max_area; }
//When the user clicks in the window... void onMouse(int event, int x, int y, int flags, void *params){ //params points to the contour sequence. Here we cast the pointer to CvContour instead of CvSeq. //CvContour is in fact an extension of CvSeq and is the structure used by cvFindContours, if we cast //to CvSeq, we won't be able to access the fields specific to CvContour. CvContour *contours = *(CvContour **)params; CvContour *ctr; //This will point to the specific contour the user clicked in. double area; //This will hold the area of the contour the user clicked in. if(!contours)return; //If there are no contours, we don't bother doing anything. //"event" tells us what occured switch(event){ case CV_EVENT_LBUTTONDOWN: //single click case CV_EVENT_LBUTTONDBLCLK: //double click printf("Click: %d %d\n",x,y); //Write out where the user clicked. //Here we retrieve the inner-most contour the user clicked in. If there is no such contour, //the function returns a null pointer, which we need to check against. ctr = contourFromPosition(contours, x, y); if(ctr){ //The user did click inside something //Calculate the area of the contour using cvContourArea. CV_WHOLE_SEQ means we want the area of the whole contour. //Important: we need to calculate the absolute value of the result using fabs because cvContourArea can return negative values. area = fabs(cvContourArea(ctr, CV_WHOLE_SEQ,0)); printf("Area: %f\n",area); //and print the result out. } break; case CV_EVENT_LBUTTONUP: //single click up case 10: //double-click up? (not documented) break; } }
size_t catcierge_haar_matcher_count_contours(catcierge_haar_matcher_t *ctx, CvSeq *contours) { size_t contour_count = 0; double area; int big_enough = 0; CvSeq *it = NULL; assert(ctx); assert(ctx->args); if (!contours) return 0; it = contours; while (it) { area = cvContourArea(it, CV_WHOLE_SEQ, 0); big_enough = (area > 10.0); if (ctx->super.debug) printf("Area: %f %s\n", area, big_enough ? "" : "(too small)"); if (big_enough) { contour_count++; } it = it->h_next; } return contour_count; }
int contorsFindBox(IplImage *src, CvMemStorage* storage, CvBox2D *box) { CvSeq *contours; int ret; double area; assert((area = src->width * src->height) > 0); ret = cvFindContours(src, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0)); if (ret == 0) return 1; for (CvSeq *c = contours; c != NULL; c = c->h_next) { c = cvApproxPoly(c, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 5, 1); double contour_area = fabs(cvContourArea(c, CV_WHOLE_SEQ, 0)); double ratio = area / contour_area; if (ratio > 1.5 && ratio < 6.0) { CvBox2D b = cvMinAreaRect2(c, NULL); memcpy(box, &b, sizeof(CvBox2D)); return 0; } } return 1; }
void sharingan() { int lowtherd =120; int hightherd=130; int small_size=500; int contour_num; cvCvtColor(vision,gray_vision,CV_BGR2GRAY); //Gauss smooth cvSmooth( gray_vision,gray_vision,CV_GAUSSIAN,3,3,0,0); //Canny edge detect cvCanny(gray_vision,gray_vision,lowtherd,hightherd,3); //Threshold cvThreshold(gray_vision,gray_vision,0,255,CV_THRESH_BINARY); //picture used to display //find countor CvSeq * fc=NULL; CvSeq * c; cvClearMemStorage(mem); contour_num=cvFindContours(gray_vision,mem,&fc,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_NONE,cvPoint(0,0)); // printf("find counters:%d\n",contour_num); c=fc; cvCopyImage(blank,road_vision); cvCopyImage(blank,sign_vision); sign_flag=0; line_num=0; corn_num=0; while(c!=NULL) { CvBox2D rect = cvMinAreaRect2(c,mem); double width=rect.size.height>rect.size.width?rect.size.height:rect.size.width; double height=rect.size.height<=rect.size.width?rect.size.height:rect.size.width; if(height*width>small_size) { double s; s=cvContourArea(c,CV_WHOLE_SEQ,0); if(s>500) { sign_flag=1; cvDrawContours(sign_vision,c,cvScalar(255,255,255,0), cvScalar(255,255,255,0),0, 1,8,cvPoint(0,0)); } else if(s<=500) { if(width>50&&height<15) { line_box[line_num]=rect; line_num++; } else { corn_box[line_num]=rect; corn_num++; } cvDrawContours(road_vision,c,cvScalar(255,255,255,0), cvScalar(255,255,255,0),0, 1,8,cvPoint(0,0)); } } c=c->h_next; } }
void split_sign() { CvSeq * sc; CvSeq * c; CvSeq * cmax; cvClearMemStorage(mem); cvFindContours(sign_vision,mem,&sc,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_NONE,cvPoint(0,0)); double smax=0; double s; c=sc; while(c!=NULL) { s=cvContourArea(c,CV_WHOLE_SEQ,0); if(s>smax) { smax=s; cmax=c; } c=c->h_next; } sign_rect=cvBoundingRect(cmax,0); cvSetImageROI(vision,sign_rect); reg_vision= cvCreateImage(cvSize(sign_rect.width,sign_rect.height),8,3); cvCopyImage(vision,reg_vision); cvResetImageROI(vision); }
void find_contour(struct ctx *ctx) { double area, max_area = 0.0; CvSeq *contours, *tmp, *contour = NULL; /* cvFindContours modifies input image, so make a copy */ cvCopy(ctx->thr_image, ctx->temp_image1, NULL); cvFindContours(ctx->temp_image1, ctx->temp_st, &contours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0)); /* Select contour having greatest area */ for (tmp = contours; tmp; tmp = tmp->h_next) { area = fabs(cvContourArea(tmp, CV_WHOLE_SEQ, 0)); if (area > max_area) { max_area = area; contour = tmp; } } /* Approximate contour with poly-line */ if (contour) { contour = cvApproxPoly(contour, sizeof(CvContour), ctx->contour_st, CV_POLY_APPROX_DP, 2, 1); ctx->contour = contour; } }
int main (int argv, char** argc[]) { int ncell = 0, prev_ncontour = 0, same_count = 0; ////while (!worker->CancellationPending) { ////worker->ReportProgress(50, String::Format(rm->GetString("Progress_Analyze_FoundNCell"), title, ncell)); cvConvert(input_morph, tmp8UC1); cvClearMemStorage(storage); int ncontour = cvFindContours(tmp8UC1, storage, &first_con, sizeof(CvContour), CV_RETR_EXTERNAL); if (ncontour == 0) break; // finish extract cell if (ncontour == prev_ncontour) { cvErode(input_morph, input_morph); same_count++; } else same_count = 0; prev_ncontour = ncontour; cur = first_con; while (cur != nullptr) { double area = fabs(cvContourArea(cur)); if ((area < 3000.0) || (same_count > 10)) { int npts = cur->total; CvPoint *p = new CvPoint[npts]; cvCvtSeqToArray(cur, p); cvFillPoly(out_single, &p, &npts, 1, cvScalar(255.0)); // move to single cvFillPoly(input_morph, &p, &npts, 1, cvScalar(0.0)); // remove from input delete[] p; ncell++; } cur = cur->h_next; } ////} }
/*retuns Area of the contour*/ double Contours::getArea(){ if(this->area==-1){ this->area=fabs(cvContourArea(this->c,CV_WHOLE_SEQ)); } return this->area; }
double CBlobGetHullArea::operator()(const CBlob &blob) const { if(blob.Edges() != NULL && blob.Edges()->total > 0) { CvSeq *hull = cvConvexHull2( blob.Edges(), 0, CV_CLOCKWISE, 1 ); return fabs(cvContourArea(hull)); } return blob.Perimeter(); }
/** * @internal * @brief Find the blobs in the received image. * What it looks for in an image is bright areas, so typically * the image result of a background subtraction is a good input. * * @param[in] inImage image where the blobs will be searched */ void BlobFinder::update( const Image& inImage ) { // Check valid if ( !isValid() ) THROW_EXCEPTION( "Trying to compute blobs, with the BlobFinder not initialized. Init method should be called" ); // Check blob area... and if it has not been set, set it to the max and min (no lower than 10, to avoid opencv issues) if ( (m_minBlobArea < 0) || (m_maxBlobArea < 0) ) { m_minBlobArea = 10; m_maxBlobArea = (float)inImage.getWidth() * (float)inImage.getHeight(); } // Check both images have same size and it is the same than the filter size if( (inImage.getNChannels() != 1) && (inImage.getNChannels() != 3) ) THROW_EXCEPTION( "Trying to compute blobs on images with non supporte format -> only RGB or GRAYSCALE images supported" ); // Request temp image to work with IplImage* cvTempImage = ImageResourceManager::getSingleton().getImage( inImage.getWidth(), inImage.getHeight(), 1 ); // If they have different number of channels -> convert them if ( inImage.getNChannels() == 3 ) cvConvertImage( &inImage.getCVImage(), cvTempImage ); // just one channel -> Copy the input image else cvCopy( &inImage.getCVImage(), cvTempImage ); // Find blobs (openCV contours) int retrivalMode = CV_RETR_EXTERNAL; // CV_RETR_CCOMP cvFindContours( cvTempImage, m_findContoursStorage, &m_contour, sizeof(CvContour), retrivalMode, CV_CHAIN_APPROX_SIMPLE ); // Extract found contours // Iterate through found contours and store them.. m_blobs.clear(); for( ; m_contour != 0; m_contour = m_contour->h_next ) { // Get contour area double area = fabs( cvContourArea( m_contour, CV_WHOLE_SEQ ) ); // If it has a good size (between min and max) if ( ( area > m_maxBlobArea ) || ( area < m_minBlobArea ) ) continue; // Store new Blob m_blobs.push_back( Blob( area, m_contour ) ); } // Release temp image ImageResourceManager::getSingleton().releaseImage( cvTempImage ); // Extract information of found blobs extractBlobsInformation(); // Clear OpenCV contours storage cvClearMemStorage( m_findContoursStorage ); }
static void find_connected_components (IplImage * mask, int poly1_hull0, float perimScale, CvMemStorage * mem_storage, CvSeq * contours) { CvContourScanner scanner; CvSeq *c; int numCont = 0; /* Just some convenience variables */ const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff); const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00); /* CLEAN UP RAW MASK */ cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR); cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR); /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */ if (mem_storage == NULL) { mem_storage = cvCreateMemStorage (0); } else { cvClearMemStorage (mem_storage); } scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0)); while ((c = cvFindNextContour (scanner)) != NULL) { double len = cvContourArea (c, CV_WHOLE_SEQ, 0); /* calculate perimeter len threshold: */ double q = (mask->height + mask->width) / perimScale; /* Get rid of blob if its perimeter is too small: */ if (len < q) { cvSubstituteContour (scanner, NULL); } else { /* Smooth its edges if its large enough */ CvSeq *c_new; if (poly1_hull0) { /* Polygonal approximation */ c_new = cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL, 0); } else { /* Convex Hull of the segmentation */ c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1); } cvSubstituteContour (scanner, c_new); numCont++; } } contours = cvEndFindContours (&scanner); /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */ cvZero (mask); /* DRAW PROCESSED CONTOURS INTO THE MASK */ for (c = contours; c != NULL; c = c->h_next) cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0, 0)); }
int Contours::areaFilter(double min_area,double max_area){ double area; area=fabs(cvContourArea(this->c,CV_WHOLE_SEQ)); int zone=getPointZone(this->x,this->y); double minAreaByZone[]={0,200,150,65,50}; //double maxAreaByZone[]={0,800,400,200,100}; double maxAreaByZone[]={0,1000,550,350,250}; return area>minAreaByZone[zone] && area<maxAreaByZone[zone]; }
//This boolean function returns whether the (x,y) point is inside the given contour. CvContour * LargestContour(void *contours){ //We know that a point is inside a contour if we can find one point in the contour that is immediately to the left, //one that is immediately on top, one that is immediately to the right and one that is immediately below the (x,y) point. //We will update the boolean variables below when these are found: char found_left=0, found_top=0, found_right=0, found_bottom=0; int count, i; //Variables used for iteration if(!contours)return 0; //Don't bother doing anything if there is no contour. CvContour *output=NULL, *contour = *(CvContour **)contours; float maxArea=0.0f; while(contour){ //We loop through the list of contours... float area=0.0; area = fabs(cvContourArea(contour, CV_WHOLE_SEQ,0)); if(maxArea < area){ maxArea = area; output = contour; } count = contour->total; //The total field holds the number of points in the contour. printf("outside contour : area=%f, points=%d\n",area, count); CvContour *inside=(CvContour *)contour->v_next; while(inside){ //Check to see if the point the user clicked on is inside the current contour, if yes... //output = contour; //Since the point is inside the contour, set the output accordingly. area = fabs(cvContourArea(inside, CV_WHOLE_SEQ,0)); count = inside->total; //The total field holds the number of points in the contour. printf(" inside Area: area=%f, points=%d\n", area, count); //and print the result out. //We now need to check all the contours inside the one we are processing. We go down one level by following the v_next (vertical next) pointer in the //CvSeq/CvContour structure. From then, we recurse into this function to go through all the contours. inside = (CvContour *)inside->v_next; } //Once we're done with this contour, we need to move to the following contour that's not included in the one we just processed. //This is pointed to by the h_next (horizontal next) pointer. contour = (CvContour *)(contour->h_next); } printf("max contour %p, area=%f\n",output, maxArea); return output; }
static COMMAND_FUNC( do_ocv_area ) { OpenCV_Seq *ocv_seq_p; ocv_seq_p=PICK_OCV_SEQ("sequence"); if( ocv_seq_p == NO_OPENCV_SEQ ) return; if( ocv_seq_p->ocv_seq == NULL ) { sprintf(ERROR_STRING, "do_ocv_area: sequence is NULL.\n"); WARN(ERROR_STRING); return; } /* Not sure when this change took effect... */ //#if CV_VERSION_INT > MAKE_VERSION_INT(1,1,0) #if CV_VERSION_INT > MAKE_VERSION_INT(2,0,0) float area = fabs(cvContourArea(ocv_seq_p->ocv_seq, CV_WHOLE_SEQ, 0 )); #else float area = fabs(cvContourArea(ocv_seq_p->ocv_seq, CV_WHOLE_SEQ )); #endif char area_string[30]; sprintf(area_string, "%f", area); ASSIGN_VAR("contour_area", area_string); }
CvSeq* BallIdentification::findLargerBlob(CvSeq* contours){ double largestArea = 0; //Const. for the largest area CvSeq* largest_contour = NULL; //Contour for the largest area while (contours != NULL){ //If the current contour available double area = fabs(cvContourArea(contours,CV_WHOLE_SEQ, false)); //Get the current contour's area as "area" if(area > largestArea){ //If "area" is larger than the previous largest area largestArea = area; largest_contour = contours; } contours = contours->h_next; //Search for the next contour } return largest_contour; }
double BlobContour::GetArea() { // is calculated? if (m_area != -1) { return m_area; } if (IsEmpty()) return 0; m_area = fabs(cvContourArea(GetContourPoints())); return m_area; }
std::vector<double> CaptureManager::GetVolumes(int c, float &avgVolume) { std::vector<double> v(frameCount, 0.0); int goodSteps = 0; float totalVolume = 0.0; for (int i=0; i<frameCount; i++) { float frameVolume = 0.0; for (int j=0; j<slideCount-1; j++) { if (Access(i,j, false, true)->contourArray.size() > c && Access(i,j+1, false, true)->contourArray.size() > c) { float area1 = calibration*calibration*fabs(cvContourArea(Access(i,j, false, true)->contourArray[c])); float area2 = calibration*calibration*fabs(cvContourArea(Access(i,j+1, false, true)->contourArray[c])); frameVolume += deltaZ*(area1 + area2)/2.0f; } } goodSteps++; totalVolume += (v[i] = frameVolume); } avgVolume = (goodSteps ? totalVolume/goodSteps : 0); return v; }
static CvSeq *_locate_puzzle_contour(IplImage *in) { IplImage *threshold_image = _threshold(in); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contour = 0; cvFindContours(threshold_image, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0)); cvReleaseImage(&threshold_image); double max_area = fabs(cvContourArea(contour, CV_WHOLE_SEQ)); CvSeq *max_contour = contour; for( CvSeq *p = contour; p != 0; p = p->h_next ) { double area = fabs(cvContourArea(p, CV_WHOLE_SEQ)); if (area > max_area) { max_area = area; max_contour = p; } } return max_contour; }
int Contours::boxAreaFilter(double minAreaRatio){ CvBox2D box; CvMemStorage* mem = cvCreateMemStorage(0); double boxArea,contourArea,dif,ret; box=cvMinAreaRect2(this->c,mem); boxArea=box.size.width*box.size.height; contourArea=fabs(cvContourArea(this->c,CV_WHOLE_SEQ)); dif=boxArea-contourArea; ret=(1- (dif/boxArea)); cvReleaseMemStorage( &mem ); return ret > minAreaRatio; }
void COpenCVCheck::OpenCVBinary(CString fileName) { CvScalar colors[] = {{255,255,255},{0,0,0}}; IplImage* pImg; //声明IplImage指针 if((pImg = cvLoadImage(fileName, 0)) != 0) { IplImage* dst = NULL; dst=cvCreateImage(cvSize(pImg->width,pImg->height),IPL_DEPTH_8U,1); //cvThreshold(pImg,dst,185,255,CV_THRESH_BINARY); cvAdaptiveThreshold(pImg,dst,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,5,3);//二值化 ReverseColor(dst); for (int kk = 0;kk<2;kk++) //去噪 { CvSeq *contours; CvMemStorage* storage = cvCreateMemStorage(0); cvFindContours( dst, storage, &contours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); //此函数以黑色为背景色 while(contours) { //approximate contour with accuracy proportional CvSeq* result = cvApproxPoly( contours, sizeof(CvContour), storage,CV_POLY_APPROX_DP, 3, 1); //to filter the noisy contour if(fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 2) { if (result->total > 0) { for(int i = 0; i < (result ? result->total : 0); i++) { CvRect* r = (CvRect*)cvGetSeqElem(result,i); cvSet2D(dst,r->y,r->x,colors[1]); } } } contours = contours->h_next; } } ReverseColor(dst); ClearNoise(dst); cvSaveImage(fileName,dst); cvReleaseImage(&dst); cvReleaseImage(&pImg); } }
double BlobGetHullArea::operator()(Blob &blob) { CvSeq *convexHull; double area; convexHull = blob.GetConvexHull(); if( convexHull ) area = fabs(cvContourArea(convexHull)); else return 0; cvClearSeq(convexHull); return area; }
std::vector<double> CaptureManager::GetAreas(int c, float &avgArea) { std::vector<double> a(frameCount, 0.0); int goodSteps = 0; float totalArea = 0.0; for (int i=0; i<frameCount; i++) { if (book[i*offset]->contourArray.size() > c) { goodSteps++; totalArea += (a[i]=fabs(cvContourArea(book[i*offset]->contourArray[c]))); } } avgArea = (goodSteps ? totalArea/goodSteps : 0); return a; }
int bwareaopen_(IplImage *image, int size) { /* OpenCV equivalent of Matlab's bwareaopen. image must be 8 bits, 1 channel, black and white (objects) with values 0 and 255 respectively */ CvMemStorage *storage; CvSeq *contour = NULL; CvScalar white, black; IplImage *input = NULL; // cvFindContours changes the input double area; int foundCountours = 0; black = CV_RGB( 0, 0, 0 ); white = CV_RGB( 255, 255, 255 ); if(image == NULL || size == 0) return(foundCountours); input = cvCloneImage(image); storage = cvCreateMemStorage(0); // pl.Ensure you will have enough //room here. cvFindContours(input, storage, &contour, sizeof (CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0)); while(contour) { area = cvContourArea(contour, CV_WHOLE_SEQ ); if( -size <= area && area <= 0) { // removes white dots cvDrawContours( image, contour, black, black, -1, CV_FILLED, 8 ); } else { if( 0 < area && area <= size) // fills in black holes cvDrawContours( image, contour, white, white, -1, CV_FILLED, 8 ); } contour = contour->h_next; } //cvNamedWindow( "contour", 0); //0 to maintains sizes regardless of image size // cvResizeWindow("contour",900,750); // new width/heigh in pixels //cvShowImage( "contour", image ); //cvWaitKey(0); //cvResetImageROI(boatFront); // cvDestroyWindow("contour"); cvReleaseMemStorage( &storage ); // desallocate CvSeq as well. cvReleaseImage(&input); return(foundCountours); }
std::vector<double> CaptureManager::GetDeformation(int c, float &avgDef) { std::vector<CvPoint> traj = GetTrajectory(c); std::vector<double> areas = GetAreas(c,avgDef); std::vector<double> defs(frameCount-1, 0.0); float totalDef = 0; int goodSteps = 0; CvSeq *h_next; ImagePlus *img_ = new ImagePlus(img); IplImage *gray = cvCreateImage(cvGetSize(img.orig), IPL_DEPTH_8U, 1); IplImage *edge = cvCreateImage(cvGetSize(img.orig), IPL_DEPTH_8U, 1); for (int i=0; i<frameCount-1; i++) { if (!(MyPoint(-1,-1)==traj[i] || MyPoint(-1,-1)==traj[i+1])) { wxPoint *ps = ContourToPointArray(Access(i,0,false, true)->contourArray[c], MyPoint(traj[i+1])-MyPoint(traj[i]).ToWxPoint()); img_->RemoveAllContours(); img_->AddContour(ps,Access(i,0,false, true)->contourArray[c]->total); delete[] ps; CvSeq *seq = Access(i+1,0,false, true)->contourArray[c]; CvSeq *oseq = img_->contourArray[0]; //Draw both contours on the temporary image cvZero(img_->orig); h_next = seq->h_next; seq->h_next = NULL; cvDrawContours(img_->orig, seq, CV_RGB(255,255,255), CV_RGB(0,0,0), 1, CV_FILLED, CV_AA, cvPoint(0,0)); seq->h_next = h_next; cvDrawContours(img_->orig, oseq, CV_RGB(255,255,200), CV_RGB(0,0,0), 1, CV_FILLED, CV_AA, cvPoint(0,0)); //detect contours on the drawn image: FindContoursPlugin::ProcessImage_static(img_,gray,edge,150,50,3,1); float unionArea = 0; for (int j=0; j<img_->contourArray.size(); j++) { unionArea += fabs(cvContourArea(img_->contourArray[j])); } goodSteps++; totalDef += (defs[i] = 2*unionArea - areas[i] - areas[i+1]); } } cvReleaseImage(&gray); cvReleaseImage(&edge); delete img_; avgDef = (goodSteps ? totalDef/goodSteps : 0); return defs; }
//各種輪郭の特徴量の取得 void GetContourFeature(CvSeq *Contour) { //面積 double Area = fabs(cvContourArea(Contour, CV_WHOLE_SEQ)); //周囲長 double Perimeter = cvArcLength(Contour); //円形度 double CircleLevel = 4.0 * CV_PI * Area / (Perimeter * Perimeter); //傾いていない外接四角形領域(フィレ径) CvRect rect = cvBoundingRect(Contour); if(perimeter_max < Perimeter) { perimeter_max = Perimeter; max_perimeter_contor = Contour; } }