int catcierge_haar_matcher_find_prey(catcierge_haar_matcher_t *ctx,
									IplImage *img, IplImage *thr_img,
									match_result_t *result, int save_steps)
{
	catcierge_haar_matcher_args_t *args = ctx->args;
	IplImage *thr_img2 = NULL;
	CvSeq *contours = NULL;
	size_t contour_count = 0;
	assert(ctx);
	assert(img);
	assert(ctx->args);

	// thr_img is modified by FindContours so we clone it first.
	thr_img2 = cvCloneImage(thr_img);

	cvFindContours(thr_img, ctx->storage, &contours,
		sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));

	// If we get more than 1 contour we count it as a prey. At least something
	// is intersecting the white are to split up the image.
	contour_count = catcierge_haar_matcher_count_contours(ctx, contours);

	// If we don't find any prey 
	if ((args->prey_steps >= 2) && (contour_count == 1))
	{
		IplImage *erod_img = NULL;
		IplImage *open_img = NULL;
		CvSeq *contours2 = NULL;

		erod_img = cvCreateImage(cvGetSize(thr_img2), 8, 1);
		cvErode(thr_img2, erod_img, ctx->kernel3x3, 3);
		if (ctx->super.debug) cvShowImage("haar eroded img", erod_img);

		open_img = cvCreateImage(cvGetSize(thr_img2), 8, 1);
		cvMorphologyEx(erod_img, open_img, NULL, ctx->kernel5x1, CV_MOP_OPEN, 1);
		if (ctx->super.debug) cvShowImage("haar opened img", erod_img);

		cvFindContours(erod_img, ctx->storage, &contours2,
			sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));
		cvReleaseImage(&erod_img);
		cvReleaseImage(&open_img);

		contour_count = catcierge_haar_matcher_count_contours(ctx, contours2);
	}

	if (ctx->super.debug)
	{
		cvDrawContours(img, contours, cvScalarAll(0), cvScalarAll(0), 1, 1, 8, cvPoint(0, 0));
		cvShowImage("Haar Contours", img);
	}

	cvReleaseImage(&thr_img2);

	return (contour_count > 1);
}
float MainWindow::matchTwoShapes(IplImage* image1, IplImage* image2)
{
    double matchresult = 100;
    double mincontour = 200;  // taille mimale du contour qu il faut le detecter
    int CVCONTOUR_APPROX_LEVEL;
    IplImage* img1_edge = cvCreateImage(cvGetSize(image1), 8, 1);
    IplImage* img2_edge = cvCreateImage(cvGetSize(image2), 8, 1);

    cvThreshold(image1, img1_edge, 128, 255, CV_THRESH_BINARY);
    cvThreshold(image2, img2_edge, 128, 255, CV_THRESH_BINARY);
    CvMemStorage* storage = cvCreateMemStorage();
    CvMemStorage* storage2 = cvCreateMemStorage();
    CvSeq* premier_contour_img1 = NULL;
    CvSeq* premier_contour_img2 = NULL;
    CvSeq* newseq = NULL;
    CvSeq* newseq2 = NULL;

    //first Border extraction
    cvFindContours(img1_edge, storage, &premier_contour_img1, sizeof(CvContour), CV_RETR_LIST);
    //second border extraction
    cvFindContours(img2_edge, storage2, &premier_contour_img2, sizeof(CvContour), CV_RETR_LIST);

    CVCONTOUR_APPROX_LEVEL = m_ui->tolerance_lvl->value();
    //extract aprox polu
    for (CvSeq* c = premier_contour_img1; c != NULL; c = c->h_next)
    {
        if (cvContourPerimeter(c) > mincontour)
        {
            newseq = cvApproxPoly(c, sizeof(CvContour), storage, CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL, 0); //pprox
        }
    }

    for(CvSeq* c = premier_contour_img2; c != NULL; c = c->h_next)
    {
        if (cvContourPerimeter(c) > mincontour)
        {
            newseq2 = cvApproxPoly(c, sizeof(CvContour), storage2, CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL, 0); //pprox
        }
    }

    //match the two contours
    if(newseq && newseq2)
    {
        matchresult = cvMatchShapes(newseq2, newseq, 1, 0.0); // inainte era cvMatchContours
    }

    cvReleaseImage(&img1_edge);
    cvReleaseImage(&img2_edge);
    cvReleaseMemStorage(&storage);
    cvReleaseMemStorage(&storage2);

    return matchresult;
}
Exemple #3
0
point* GetContourPointRandom(const IplImage* img,int npoints)
{
	point* pts = NULL;
	IplImage* img_bw = cvCloneImage(img);
	// 2 . 轮廓提取
	CvMemStorage * storage = cvCreateMemStorage();
	CvSeq * contour = NULL;
	/*cvShowImage("song",img_bw);
	cvWaitKey(0);*/
	int tatal = cvFindContours(img_bw,storage,&contour,sizeof(CvContour),
		CV_RETR_LIST,CV_CHAIN_APPROX_NONE );

	CvPoint *samplearray = (CvPoint *)malloc(npoints * sizeof(CvPoint));

	if(contoursample(contour,samplearray,npoints))
	{//随机点生成成功
		pts = (point *)malloc(npoints * sizeof(point));
		for(int i=0;i<npoints;i++)
		{
			pts[i].x = samplearray[i].x;
			pts[i].y = samplearray[i].y;
		}
	}



	cvReleaseMemStorage(&storage);
	free(samplearray);

	return pts;
}
Exemple #4
0
int main (int argv, char** argc[])
{
    int ncell = 0, prev_ncontour = 0, same_count = 0;
	////while (!worker->CancellationPending) {
		////worker->ReportProgress(50, String::Format(rm->GetString("Progress_Analyze_FoundNCell"), title, ncell));
		cvConvert(input_morph, tmp8UC1);
		cvClearMemStorage(storage);
		int ncontour = cvFindContours(tmp8UC1, storage, &first_con, sizeof(CvContour), CV_RETR_EXTERNAL);
		if (ncontour == 0)
			break; // finish extract cell
		if (ncontour == prev_ncontour) {
			cvErode(input_morph, input_morph);
			same_count++;
		} else
			same_count = 0;
		prev_ncontour = ncontour;
		cur = first_con;
		while (cur != nullptr) {
			double area = fabs(cvContourArea(cur));
			if ((area < 3000.0) || (same_count > 10)) {
				int npts = cur->total;
				CvPoint *p = new CvPoint[npts];
				cvCvtSeqToArray(cur, p);
				cvFillPoly(out_single, &p, &npts, 1, cvScalar(255.0)); // move to single
				cvFillPoly(input_morph, &p, &npts, 1, cvScalar(0.0)); // remove from input
				delete[] p;
				ncell++;
			}
			cur = cur->h_next;
		}
	////}
}
double rotationTest(IplImage *thresholdImage, IplImage* contourPicture, vector<ContourData> contourData){
    int returnValue = 0;
    
    // create storate for contour
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq *first_contour = 0;
    
    //decide a interesting area (center)
    int imageMinX = cvGetSize(thresholdImage).width/4 , imageMaxX = cvGetSize(thresholdImage).width - imageMinX, imageMinY = cvGetSize(thresholdImage).height/4, imageMaxY = cvGetSize(thresholdImage).height - imageMinY;
    
    //find the contour
    int nContour = cvFindContours(thresholdImage, storage, &first_contour, sizeof(CvContour), CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
//    cout<<"nContour = "<< nContour<< "\n";
    
    CvSeq *contour; //for saving the contour
    
    for (contour = first_contour ; contour != 0 ; contour = contour->h_next) {
        int tmp_maxX = 0 , tmp_maxY = 0 , tmp_minX = 1000000, tmp_minY = 1000000, tmp_width = 0, tmp_height = 0;
        
        // Draw the contour
        cvDrawContours(contourPicture, contour, CV_RGB(0,0,255), CV_RGB(255, 0, 0), 2, 1);
        
        //draw the rectangle on the picture outside of contour
        for (int i = 0 ; i < contour->total ; i++) {
            // find the area of contour
            CvPoint* ContourPoint = (CvPoint*)cvGetSeqElem(contour, i);
            if (tmp_maxX < ContourPoint->x)
                tmp_maxX = ContourPoint->x;
            if (tmp_maxY < ContourPoint->y)
                tmp_maxY = ContourPoint->y;
            if (tmp_minX > ContourPoint->x)
                tmp_minX = ContourPoint->x;
            if (tmp_minY > ContourPoint->y)
                tmp_minY = ContourPoint->y;
        }
        //calculate width and height
        tmp_width = tmp_maxX - tmp_minX; tmp_height = tmp_maxY - tmp_minY;
        
        //if rate of fiture is right, input data in vector
        if((double)tmp_width/(double)tmp_height < 1 && tmp_height > 20 && tmp_maxX < imageMaxX && tmp_minX > imageMinX && tmp_minY > imageMinY && tmp_maxY < imageMaxY ){
        
            //cvRectangle(contourPicture, CvPoint(tmp_maxX,tmp_maxY), CvPoint(tmp_minX,tmp_minY), CV_RGB(0, 255, 0));
            ContourData tmp_contour;
            tmp_contour.setMaxX(tmp_maxX); tmp_contour.setMaxY(tmp_maxY); tmp_contour.setMinX(tmp_minX); tmp_contour.setMinY(tmp_minY); tmp_contour.setHeight(tmp_height); tmp_contour.setWidth(tmp_width);
            tmp_contour.setCenter();
            contourData.push_back(tmp_contour);
            
            //confirm the value stored at vector
            //            cout<<"max ( "<<tmp_maxX<<","<<tmp_maxY<<")\n";
            //            cout<<"min ( "<<tmp_minX<<","<<tmp_minY<<")\n";
            //            cout<<"width : "<<tmp_width<< " height : "<<tmp_height<<"\n";
        }
    }
    returnValue = arrangeContourData(contourData, contourPicture);
        
    //find the number area.
    cvReleaseMemStorage(&storage);
    
    return returnValue;
}
Exemple #6
0
void Segment::updateContour()
{
	contour = 0;

	cvFindContours(iplMask, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
	CvMoments moments;
	
//	CvSeq* c = contour;
//cerr << "-------------------------- " << c << endl;
	/*for( CvSeq* c = contour; c!=NULL; c=c->h_next ){
			for(int i = 0; i < c->total; i++){
				CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, c, i );
				cerr << p->x << "," << p->y << endl;
			}
		}
*/
	cvMoments(contour, &moments);
	double m00, m10, m01;
	
	m00 = cvGetSpatialMoment(&moments, 0,0);
	m10 = cvGetSpatialMoment(&moments, 1,0);
	m01 = cvGetSpatialMoment(&moments, 0,1);

	// TBD check that m00 != 0
	float center_x = m10/m00;
	float center_y = m01/m00;
	centroid = cvPoint(center_x, center_y);
}
//----------------------------------------------------------------------------------
void ofxCvBlobFinder::findBlobs(ofxCvGrayscaleImage image, bool find_holes) {
    
    CvMemStorage *stor = cvCreateMemStorage();
    IplImage *img = image.getCvImage();
    CvSeq *contours;
    
    _width = img->width;
    _height = img->height;
    
    // CV_RETR_EXTERNAL to not find holes
    int mode = (find_holes)?CV_RETR_LIST:CV_RETR_EXTERNAL;
    
    cvFindContours(img, stor, &contours, sizeof(CvContour), mode, CV_CHAIN_APPROX_SIMPLE);
    
    blobz.clear();
    while (contours) {
        ofxCvComplexBlob b =  ofxCvComplexBlob(contours);
        b.setApproxFactor(approxFactor);
        b.getApproxPoints();
        b.getHullPoints();
        blobz.push_back( b );
        contours = contours->h_next;
    }
    
    // sort blobs
    sort(blobz.begin(),  blobz.end(), sort_blob_func);
}
Exemple #8
0
int main( int argc, char** argv )
{
    IplImage* src;
    // the first command line parameter must be file name of binary
    // (black-n-white) image
    if( argc == 2 && (src=cvLoadImage(argv[1], 0))!= 0)
    {
        IplImage* dst = cvCreateImage( cvGetSize(src), 8, 3 );
        CvMemStorage* storage = cvCreateMemStorage(0);
        CvSeq* contour = 0;

        cvThreshold( src, src, 50, 255, CV_THRESH_BINARY );
        cvNamedWindow( "Source", 1 );
        cvShowImage( "Source", src );

        cvFindContours( src, storage, &contour, sizeof(CvContour),
                      CV_RETR_CCOMP, CV_CHAIN_CODE );
        cvZero( dst );

        for( ; contour != 0; contour = contour->h_next )
        {
            CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 );
            /* replace CV_FILLED with 1 to see the outlines */
            cvDrawContours( dst, contour, color, color, 0, 1, CV_AA );
        }

        cvNamedWindow( "Components", 1 );
        cvShowImage( "Components", dst );
        cvWaitKey(0);
    }
}
int hfBinaryClassify(IplImage *bin, float minRad, float maxRad, CandidatePtrVector& kps ) {

  // Create OpenCV storage block
  CvMemStorage *mem = cvCreateMemStorage(0);
  CvSeq *Contours = NULL, *ptr = NULL;

  // Identify Contours in binary image
  cvFindContours( bin, mem, &Contours, sizeof(CvContour),
    CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

  //Process
  int highcount = 0;
  int lowcount = 0;
  for( ptr = Contours; ptr != NULL; ptr = ptr->h_next ) {
    int ret = findStableMatches( ptr, minRad, maxRad, kps, bin );
    if( ret == -1 )
      lowcount++;
    if( ret == 1 )
      highcount++;
  }

  //Create return
  int retval = 0x0;
  if( lowcount > 100000 )
    retval = retval | AT_LOWER;
  if( highcount > 0 )
    retval = retval | AT_RAISE;

  //Deallocate
  if(Contours)
  {
    cvReleaseMemStorage( &Contours->storage );
  }
  return 0;
}
Exemple #10
0
void Image::drawContours() {
  IplImage* src = this -> toIplImage();
  IplImage* dst =  this -> cloneEmpty() -> toIplImage();

  CvMemStorage* storage = cvCreateMemStorage(0);
  CvSeq* contour = 0;

  cvThreshold( src, src, 1, 255, CV_THRESH_BINARY );

  cvNamedWindow( "Source", 1 );
  cvShowImage( "Source", src );
  
  cvFindContours( src, storage, &contour, sizeof(CvContour),
                  CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

  cvZero( dst );

  for( ; contour != 0; contour = contour->h_next ) 
    {
      CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 );
      /* replace CV_FILLED with 1 to see the outlines */
      cvDrawContours( dst, contour, color, color, -1, CV_FILLED, 8 );
    }
  cvNamedWindow( "Components", 1 );
  cvShowImage( "Components", dst );
  cvWaitKey(0);  
}
Exemple #11
0
vector<float> feature::getPAR(IplImage *src, int mask)
{
    float perimeter, area, rc, i;
    perimeter = area = i = 0;
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* contours = 0;
    cvFindContours(src, storage, &contours, sizeof(CvContour), CV_RETR_LIST);
    if (contours) {
        CvSeq* c = contours;
        for (; c != NULL; c = c->h_next) {
            if (cvContourArea(c) < 1000) continue;
            perimeter += cvArcLength(c);
            area += cvContourArea(c);
//            perimeter = perimeter > cvArcLength(c) ? perimeter : cvArcLength(c);
//            area = area > cvContourArea(c) ? area : cvContourArea(c);
            i++;
            //qDebug("\tmask = %d, i = %d, perimeter = %f, area = %f", mask, i, perimeter, area);
        }
    }
    if (area == 0)
        rc = -1;
    else
        rc = perimeter * perimeter / (4 * 3.14 * area);

    //form feature based on mask
    vector<float> PAR({perimeter, area, rc});

    if (mask == 2) {
        PAR.push_back(i);
    }

    cvReleaseMemStorage(&storage);

    return PAR;
}
Exemple #12
0
void reg_sign()
{
    int counter_num=0;
    IplImage * temp;
	temp= cvCreateImage(cvSize(sign_rect.width,sign_rect.height),8,1);
    cvCvtColor(reg_vision,temp,CV_BGR2GRAY);
    //Gauss smooth
    cvSmooth(temp,temp,CV_GAUSSIAN,3,3,0,0);
    //Canny edge detect
	cvCanny(temp,temp,120,150,3);
    //Threshold
    CvSeq * sc;
	CvSeq * c;
 cvThreshold(temp,temp,0,255,CV_THRESH_BINARY);
    counter_num=cvFindContours(temp,mem,&sc,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_NONE,cvPoint(0,0));
    double rmin=-1;
    double r;
    c=sc;
    CvBox2D minrect;
	while(c!=NULL)
	{

        CvBox2D rect = cvMinAreaRect2(c,mem);
        r=((double)(rect.size.width*rect.size.height))/(sign_rect.width*sign_rect.height);
        if(r>0.1)
        {
            if(r<rmin||rmin<0)
            {
                rmin=r;
                minrect=rect;
            }
        }
        c=c->h_next;
    }
    //printf("counter:%d rate:%f\n",counter_num,rmin);
    //cvShowImage("reg_vision",reg_vision);
    if(rmin<0.2)
    {
       cur_sign=GO_AHEAD; 
        printf("GO_AHEAD!\n");
    }
    else if(rmin<0.5)
    {
       cur_sign=TURN_RIGHT; 
        printf("TURN_RIGHT!\n");
    }
    else if(rmin<0.7)
    {
        cur_sign=STOP;
        printf("STOP!\n");
    }
    else
    {
        cur_sign=NONE;
        printf("NONE!\n");
    }
    
    cvReleaseImage(&reg_vision);
    cvReleaseImage(&temp);
}
Exemple #13
0
void sharingan()
{
	int lowtherd =120;
	int hightherd=130;
	int small_size=500;
	int contour_num;

	cvCvtColor(vision,gray_vision,CV_BGR2GRAY);
	//Gauss smooth
	cvSmooth( gray_vision,gray_vision,CV_GAUSSIAN,3,3,0,0);
	//Canny edge detect
	cvCanny(gray_vision,gray_vision,lowtherd,hightherd,3);
	//Threshold
	cvThreshold(gray_vision,gray_vision,0,255,CV_THRESH_BINARY);
	//picture used to display
	//find countor
	CvSeq * fc=NULL;
	CvSeq * c;
	cvClearMemStorage(mem);
	contour_num=cvFindContours(gray_vision,mem,&fc,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_NONE,cvPoint(0,0));
	//    printf("find counters:%d\n",contour_num);

	c=fc;
	cvCopyImage(blank,road_vision);
	cvCopyImage(blank,sign_vision);
	sign_flag=0;
	line_num=0;
	corn_num=0;
	while(c!=NULL)
	{
		CvBox2D rect = cvMinAreaRect2(c,mem);
		double width=rect.size.height>rect.size.width?rect.size.height:rect.size.width;
		double height=rect.size.height<=rect.size.width?rect.size.height:rect.size.width;
		if(height*width>small_size)
		{
			double s;
			s=cvContourArea(c,CV_WHOLE_SEQ,0);
			if(s>500)
			{
				sign_flag=1;
				cvDrawContours(sign_vision,c,cvScalar(255,255,255,0), cvScalar(255,255,255,0),0, 1,8,cvPoint(0,0));
			}
			else if(s<=500) 
			{
				if(width>50&&height<15)
				{
					line_box[line_num]=rect;
					line_num++;
				}		
				else
				{
					corn_box[line_num]=rect;
					corn_num++;
				}
				cvDrawContours(road_vision,c,cvScalar(255,255,255,0), cvScalar(255,255,255,0),0, 1,8,cvPoint(0,0));
			}
		}
		c=c->h_next;
	}
}
Exemple #14
0
int contorsFindBox(IplImage *src, CvMemStorage* storage, CvBox2D *box)
{
    CvSeq *contours;
    int ret;
    double area;
    assert((area = src->width * src->height) > 0);

    ret = cvFindContours(src, storage,
                              &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));
    if (ret == 0) return 1;

    for (CvSeq *c = contours; c != NULL; c = c->h_next) {
        c = cvApproxPoly(c, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 5, 1);
        double contour_area = fabs(cvContourArea(c, CV_WHOLE_SEQ, 0));
        double ratio = area / contour_area;

        if (ratio > 1.5 && ratio < 6.0) {
            CvBox2D b = cvMinAreaRect2(c, NULL);
            memcpy(box, &b, sizeof(CvBox2D));

            return 0;
        }
    }

    return 1;
}
Exemple #15
0
void split_sign()
{
    CvSeq * sc;
	CvSeq * c;
	CvSeq * cmax;
    cvClearMemStorage(mem);
	cvFindContours(sign_vision,mem,&sc,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_NONE,cvPoint(0,0));
    double smax=0;
    double s;
    c=sc;
	while(c!=NULL)
	{
        s=cvContourArea(c,CV_WHOLE_SEQ,0);
        if(s>smax)
        {
            smax=s;
            cmax=c;
        }
        c=c->h_next;
    }
    sign_rect=cvBoundingRect(cmax,0);
    cvSetImageROI(vision,sign_rect);
    reg_vision= cvCreateImage(cvSize(sign_rect.width,sign_rect.height),8,3);
	cvCopyImage(vision,reg_vision);
    cvResetImageROI(vision);
}
Exemple #16
0
/*
 * Smooths, thresholds and finds the worms contour.
 * The original image must already be loaded into Worm.ImgOrig
 * The Smoothed image is deposited into Worm.ImgSmooth
 * The thresholded image is deposited into Worm.ImgThresh
 * The Boundary is placed in Worm.Boundary
 *
 */
void FindWormBoundary(WormAnalysisData* Worm, WormAnalysisParam* Params){
	/** This function currently takes around 5-7 ms **/
	/**
	 * Before I forget.. plan to make this faster by:
	 *  a) using region of interest
	 *  b) decimating to make it smaller (maybe?)
	 *  c) resize
	 *  d) not using CV_GAUSSIAN for smoothing
	 */
	TICTOC::timer().tic("cvSmooth");
	cvSmooth(Worm->ImgOrig,Worm->ImgSmooth,CV_GAUSSIAN,Params->GaussSize*2+1);
	//cvSmooth(Worm->ImgOrig,Worm->ImgSmooth,CV_MEDIAN,Params->GaussSize*2+1);
	//cvSmooth(Worm->ImgOrig,Worm->ImgSmooth,CV_BLUR,Params->GaussSize*2+1,Params->GaussSize*2+1);
	TICTOC::timer().toc("cvSmooth");
	TICTOC::timer().tic("cvThreshold");
	cvThreshold(Worm->ImgSmooth,Worm->ImgThresh,Params->BinThresh,255,CV_THRESH_BINARY );
	TICTOC::timer().toc("cvThreshold");
	CvSeq* contours;
	IplImage* TempImage=cvCreateImage(cvGetSize(Worm->ImgThresh),IPL_DEPTH_8U,1);
	cvCopy(Worm->ImgThresh,TempImage);
	TICTOC::timer().tic("cvFindContours");
	cvFindContours(TempImage,Worm->MemStorage, &contours,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE,cvPoint(0,0));
	TICTOC::timer().toc("cvFindContours");
	TICTOC::timer().tic("cvLongestContour");
	if (contours) LongestContour(contours,&(Worm->Boundary));
	TICTOC::timer().toc("cvLongestContour");
	cvReleaseImage(&TempImage);

}
void ControlWidget::ContourImage()
{
    if(this->m_storage == NULL) {
        this->contour_image = cvCreateImage(cvGetSize(this->imagerd), IPL_DEPTH_8U, 1);
        this->m_storage = cvCreateMemStorage(0);
    }
    else {
        this->contour_image = cvCreateImage(cvGetSize(this->imagerd), IPL_DEPTH_8U, 1);
        cvClearMemStorage(this->m_storage);
    }

    CvSeq* contours = 0;

    cvCvtColor(this->imagerd, this->contour_image, CV_BGR2GRAY);
    cvCanny(this->contour_image, this->contour_image, this->contour_Low, this->contour_High);
    cvFindContours(this->contour_image, this->m_storage, &contours, sizeof(CvContour), CV_RETR_TREE);

    cvZero(this->contour_image);

    if(contours) {
        cvDrawContours(this->contour_image, contours, cvScalarAll(255), cvScalarAll(128), 5);

        QImage ContourImage = QImage((const unsigned char*)(this->contour_image->imageData),
                                     this->contour_image->width, this->contour_image->height,
                                     QImage::Format_Indexed8).rgbSwapped();

         this->bufferContourImage = new QPixmap();
        *bufferContourImage = QPixmap::fromImage(ContourImage);
        *bufferContourImage = bufferContourImage->scaled(250, 200);
    }

    cvClearSeq(contours);
    cvZero(this->contour_image);
}
Exemple #18
0
void find_contour(struct ctx *ctx)
{
	double area, max_area = 0.0;
	CvSeq *contours, *tmp, *contour = NULL;

	/* cvFindContours modifies input image, so make a copy */
	cvCopy(ctx->thr_image, ctx->temp_image1, NULL);
	cvFindContours(ctx->temp_image1, ctx->temp_st, &contours,
		       sizeof(CvContour), CV_RETR_EXTERNAL,
		       CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

	/* Select contour having greatest area */
	for (tmp = contours; tmp; tmp = tmp->h_next) {
		area = fabs(cvContourArea(tmp, CV_WHOLE_SEQ, 0));
		if (area > max_area) {
			max_area = area;
			contour = tmp;
		}
	}

	/* Approximate contour with poly-line */
	if (contour) {
		contour = cvApproxPoly(contour, sizeof(CvContour),
				       ctx->contour_st, CV_POLY_APPROX_DP, 2,
				       1);
		ctx->contour = contour;
	}
}
Exemple #19
0
void findContours( IplImage* img, CvMemStorage* storage, CvSeq **contours)
{
    //for findContour function
    IplImage* timg  =NULL;
    IplImage* gray  =NULL;
    IplImage* tgray =NULL;

    CvSize sz = cvSize( img->width, img->height );

	// make a copy of input image
	gray = cvCreateImage( sz, img->depth, 1 );
	timg = cvCreateImage( sz, img->depth, 1 );
	tgray = cvCreateImage( sz, img->depth, 1 );

	cvSetImageCOI(img,1);
    cvCopy( img, timg,NULL );
	cvSetImageCOI(img,0);

    cvCopy( timg, tgray, 0 );

    cvCanny( tgray, gray, ct1, ct2, 5 );
    // holes between edge segments
    cvDilate( gray, gray, 0, 2 );

    cvFindContours( gray, storage, contours,
                    sizeof(CvContour),CV_RETR_LIST,
                    CV_CHAIN_APPROX_NONE, cvPoint(0,0) );

    //release all the temporary images
    cvReleaseImage( &gray );
    cvReleaseImage( &tgray );
    cvReleaseImage( &timg );

}
/*	The function will return the connected components in 'comp', 
	as well as the number of connected components 'nc'.
	At this point, we have to determine whether the components are eye pair or not.
	We'll use experimentally derived heuristics for this, based on the width, 
	height, vertical distance, and horizontal distance of the components. 
	To make things simple, we only proceed if the number of the connected components is 2.*/
int get_connected_components(IplImage* img, IplImage* prev, CvRect window, CvSeq** comp)
{
		IplImage* _diff;
 
		cvZero(diff);
 
    /* apply search window to images */
		cvSetImageROI(img, window);
		cvSetImageROI(prev, window);
		cvSetImageROI(diff, window);
 
    /* motion analysis */
		cvSub(img, prev, diff, NULL);
		cvThreshold(diff, diff, 5, 255, CV_THRESH_BINARY);
		cvMorphologyEx(diff, diff, NULL, kernel, CV_MOP_OPEN, 1);
 
    /* reset search window */
		cvResetImageROI(img);
		cvResetImageROI(prev);
		cvResetImageROI(diff);
 
		_diff = (IplImage*)cvClone(diff);
 
    /* get connected components */
		int nc = cvFindContours(_diff, storage, comp, sizeof(CvContour),
                            CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
 
		cvClearMemStorage(storage);		
		cvReleaseImage(&_diff);
	
		return nc;
}
void FaceDetection::FindContours(IplImage* imgGray)
{
    ReallocImage(&m_imgThresh, cvGetSize(imgGray), 1);
    if (NULL == m_imgThresh)
        return;
    //
    int iNumLayers = m_iNumLayers;
    int iMinLevel = 0, iMaxLevel = 255, iStep = 255 / iNumLayers;
    ThresholdingParam(imgGray, iNumLayers, iMinLevel, iMaxLevel, iStep);
    // init
    cvReleaseMemStorage(&m_mstgContours);
    m_mstgContours = cvCreateMemStorage();
    if (NULL == m_mstgContours)
        return;
    memset(m_seqContours, 0, sizeof(CvSeq*) * MAX_LAYERS);

    cvReleaseMemStorage(&m_mstgRects);
    m_mstgRects = cvCreateMemStorage();
    if (NULL == m_mstgRects)
        return;
    m_seqRects = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvContourRect), m_mstgRects); 
    if (NULL == m_seqRects)
        return;
    // find contours
    for (int l = iMinLevel, i = 0; l < iMaxLevel; l += iStep, i++)
    {
        cvThreshold(imgGray, m_imgThresh, (double)l, (double)255, CV_THRESH_BINARY);
        if (cvFindContours(m_imgThresh, m_mstgContours, &m_seqContours[i], sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE))
            AddContours2Rect(m_seqContours[i], l, i);
    }
    // sort rects
    cvSeqSort(m_seqRects, CompareContourRect, NULL);
}// void FaceDetection::FindContours(IplImage* imgGray)
Exemple #22
0
int main()
{
  IplImage* newImg = NULL;
  IplImage* grayImg = NULL;
  IplImage* contourImg = NULL;
  //parameters for the contour detection
  CvMemStorage * storage = cvCreateMemStorage(0);
  CvSeq * contour = 0;
  int mode = CV_RETR_EXTERNAL;
  mode = CV_RETR_CCOMP; //detect both outside and inside contour
  cvNamedWindow("src", 1);
  cvNamedWindow("contour",1);
  //load original image
  newImg = cvLoadImage("AlignedHiResStack_2_6_2011_00.tif",1);
  //create a single channel 1 byte image (i.e. gray-level image)
  grayImg = cvCreateImage( cvSize(newImg->width, newImg->height), IPL_DEPTH_8U, 1 );
  //convert original color image (3 channel rgb color image) to gray-level image
  cvCvtColor( newImg, grayImg, CV_BGR2GRAY );
  cvShowImage( "src", newImg );
  //make a copy of the original image to draw the detected contour
  contourImg = cvCreateImage(cvGetSize(newImg), IPL_DEPTH_8U, 3);
  contourImg=cvCloneImage( newImg );
  //find the contour
  cvFindContours(grayImg, storage, &contour, sizeof(CvContour), mode, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
  //draw the contour
  cvDrawContours(contourImg, contour, CV_RGB(0, 255, 0), CV_RGB(255, 0, 0), 2, 2, 8);
  cvShowImage( "contour", contourImg );
  cvWaitKey(0);
  cvDestroyWindow( "src" );  cvDestroyWindow( "contour" );
  cvReleaseImage( &newImg );   cvReleaseImage( &grayImg );  cvReleaseImage( &contourImg );
  cvReleaseMemStorage(&storage);
  return 0;
}
Exemple #23
0
FkInt32S FkSilhDetector_PaintedFlies::denoiseChangeMask() 
{
    cvErode(m_rawChangeMask,m_denoisedChangeMask,NULL,1);
    cvDilate(m_denoisedChangeMask,m_denoisedChangeMask,NULL,1);
    CvMemStorage* storage = cvCreateMemStorage();
    CvSeq* first_contour = NULL;

//    cvMorphologyEx( m_rawChangeMask, m_denoisedChangeMask, 0, 0, CV_MOP_CLOSE, 2);//CVCLOSE_ITR );

    int Nc = cvFindContours(m_denoisedChangeMask,storage,&first_contour,sizeof(CvContour),CV_RETR_EXTERNAL);//CV_RETR_LIST);

    cvZero(m_denoisedChangeMask);

    for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) 
    {
        double len = cvContourPerimeter( c );
        if (len>10) //to make sure we get rid of noises
        {
            cvDrawContours(m_denoisedChangeMask, c, cvScalar(255), cvScalar(255), -1, CV_FILLED, 8);
        }
    }

    cvReleaseMemStorage(&storage);
    return(FK_OK);

}
Exemple #24
0
//create lists of segments of all contours from image
CvSeq* cvExtractSingleEdges(IplImage* image,  //bw image - it's content will be destroyed by cvFindContours
                            CvMemStorage* storage) {
    CvMemStorage* tmp_storage = cvCreateChildMemStorage(storage);
    CvSeq* contours = 0;
    cvFindContours(image, tmp_storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
    cvZero(image);

    //iterate through contours
    //iterate through tree
    CvSeq* current = contours;
    int number = 0;
    int level = 1;

    CvSeq* output = 0;
    CvSeq* tail_seq = 0;

    //actually this loop can iterates through tree,
    //but still we use CV_RETR_LIST it is not useful
    while (current) {
        number++;

        //get vertical list of segments for one contour
        CvSeq* new_seq = icvCutContourRaster(current, storage,  image);

        //add this vertical list to horisontal list
        if (new_seq) {
            if (tail_seq) {
                tail_seq->h_next = new_seq;
                new_seq->h_prev = tail_seq;
                tail_seq = new_seq;
            } else {
                output = tail_seq = new_seq;
            }
        }

        //iteration through tree
        if (current->v_next) {
            //goto child
            current = current->v_next;
            level++;
        } else {
            //go parent
            while (!current->h_next) {
                current = current->v_prev;
                level--;
                if (!level) { break; }
            }

            if (current) {   //go brother
                current = current->h_next;
            }
        }
    }

    //free temporary memstorage with initial contours
    cvReleaseMemStorage(&tmp_storage);

    return output;
}
Exemple #25
0
// Define trackbar callback functon. This function find contours,
// draw it and approximate it by ellipses.
void process_image(int h)
{
    CvMemStorage* storage;
    CvSeq* contour;

    // Create dynamic structure and sequence.
    storage = cvCreateMemStorage(0);
    contour = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint) , storage);

    // Threshold the source image. This needful for cvFindContours().
    cvThreshold( image03, image02, slider_pos, 255, CV_THRESH_BINARY );

    // Find all contours.
    cvFindContours( image02, storage, &contour, sizeof(CvContour),
                    CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0,0));

    // Clear images. IPL use.
    cvZero(image02);
    cvZero(image04);

    // This cycle draw all contours and approximate it by ellipses.
    for(;contour;contour = contour->h_next)
    {
        int count = contour->total; // This is number point in contour
        CvPoint center;
        CvSize size;
        CvBox2D box;

        // Number point must be more than or equal to 6 (for cvFitEllipse_32f).
        if( count < 6 )
            continue;

        CvMat* points_f = cvCreateMat( 1, count, CV_32FC2 );
        CvMat points_i = cvMat( 1, count, CV_32SC2, points_f->data.ptr );
        cvCvtSeqToArray( contour, points_f->data.ptr, CV_WHOLE_SEQ );
        cvConvert( &points_i, points_f );

        // Fits ellipse to current contour.
        box = cvFitEllipse2( points_f );

        // Draw current contour.
        cvDrawContours(image04,contour,CV_RGB(255,255,255),CV_RGB(255,255,255),0,1,8,cvPoint(0,0));

        // Convert ellipse data from float to integer representation.
        center = cvPointFrom32f(box.center);
        size.width = cvRound(box.size.width*0.5);
        size.height = cvRound(box.size.height*0.5);

        // Draw ellipse.
        cvEllipse(image04, center, size,
                  -box.angle, 0, 360,
                  CV_RGB(0,0,255), 1, CV_AA, 0);

        cvReleaseMat(&points_f);
    }

    // Show image. HighGUI use.
    cvShowImage( "Result", image04 );
}
int main( int argc, char** argv )
{
    int i, j;
    CvMemStorage* storage = cvCreateMemStorage(0);
    IplImage* img = cvCreateImage( cvSize(w,w), 8, 1 );

    cvZero( img );

    for( i=0; i < 6; i++ )
    {
        int dx = (i%2)*250 - 30;
        int dy = (i/2)*150;
        CvScalar white = cvRealScalar(255);
        CvScalar black = cvRealScalar(0);

        if( i == 0 )
        {
            for( j = 0; j <= 10; j++ )
            {
                double angle = (j+5)*CV_PI/21;
                cvLine(img, cvPoint(cvRound(dx+100+j*10-80*cos(angle)),
                    cvRound(dy+100-90*sin(angle))),
                    cvPoint(cvRound(dx+100+j*10-30*cos(angle)),
                    cvRound(dy+100-30*sin(angle))), white, 1, 8, 0);
            }
        }

        cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(100,70), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(10,5), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+150, dy+150), cvSize(40,10), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+27, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+273, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
    }

    cvNamedWindow( "image", 1 );
    cvShowImage( "image", img );

    cvFindContours( img, storage, &contours, sizeof(CvContour),
                    CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

    // comment this out if you do not want approximation
    contours = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 );

    cvNamedWindow( "contours", 1 );
    cvCreateTrackbar( "levels+3", "contours", &levels, 7, on_trackbar );
    
    on_trackbar(0);
    cvWaitKey(0);
    cvReleaseMemStorage( &storage );
    cvReleaseImage( &img );

    return 0;
}
Exemple #27
0
/* compute_ContourTree: Compute the contours of the filtered camera image
						Returns updated image and sequence of points (CvPoint)
						Image returned within context shall be a 3-channel RGB image
	@img_8uc1: An 8-bit single channel image 
*/
IplImage * compute_ContourTree(IplImage *img_8uc1)
{
	IplImage *img_edge = cvCreateImage(cvGetSize(img_8uc1), 8, 1);
	IplImage *img_8uc3 = cvCreateImage(cvGetSize(img_8uc1), 8, 3);

	CvSeq *ptSeq = NULL; //point sequence
	CvMemStorage *storage = cvCreateMemStorage(); //storage for contours creation
	
	cvThreshold(img_8uc1, img_edge, 128, 255, CV_THRESH_BINARY);

	CvSeq *c, *first_contour = NULL;
	CvSeq *biggestContour = NULL;
	int numContours = cvFindContours(img_edge,storage,&first_contour, sizeof(CvContour), CV_RETR_LIST);

	if(numContours == 0)
		return NULL;
	/*
	double result1, result2;
	result1 = result2 = 0;

	// find the largest contour 
	  // this is the whole hand
	
	for(c = first_contour; c != NULL; c=c->h_next){
		result1 = cvContourArea(c,CV_WHOLE_SEQ);
		
		if(result1 > result2){
			result2 = result1;
			biggestContour = c;
		}
	}
	*/
	for(c = first_contour; c != NULL; c=c->h_next){
	cvCvtColor(img_8uc1, img_8uc3, CV_GRAY2BGR);
	cvDrawContours(img_8uc3,c,CVX_RED,CVX_BLUE, 1,1,8); //note define (CVX...) if not including ocv.h

	CvSeq *hull;
	hull = cvConvexHull2(c, 0, CV_CLOCKWISE, 0);
	CvPoint pt0;

	pt0 = *(CvPoint *)cvGetSeqElem(hull, hull->total - 1); //??
	for(int i=0; i < hull->total; ++i){
		CvPoint pt = *(CvPoint *)cvGetSeqElem(hull, i);
		cvLine(img_8uc3, pt0, pt, CV_RGB( 0, 255, 0 ));
		pt0 = pt;
	}
	cvShowImage("CONVEX WINDOW", img_8uc3);
	}
	
	/*Convexctx_t *retCtx = (Convexctx_t *)malloc(sizeof(*retCtx));
	retCtx->image = img_8uc3;
	retCtx->contour = c;
	retCtx->destroy = allocationCleanup; //destructor
	cvReleaseImage(&img_edge);
	return retCtx;
	*/
	return img_8uc3;

}
Exemple #28
0
/**
 * @internal
 * @brief Find the blobs in the received image.
 * What it looks for in an image is bright areas, so typically 
 * the image result of a background subtraction is a good input.
 * 
 * @param[in] inImage image where the blobs will be searched
 */
void BlobFinder::update( const Image& inImage )
{
	// Check valid
	if ( !isValid() )
	THROW_EXCEPTION( "Trying to compute blobs, with the BlobFinder not initialized. Init method should be called" );

	// Check blob area... and if it has not been set, set it to the max and min (no lower than 10, to avoid opencv issues)
	if ( (m_minBlobArea < 0) || (m_maxBlobArea < 0) )
	{
		m_minBlobArea = 10;
		m_maxBlobArea = (float)inImage.getWidth() * (float)inImage.getHeight();
	}

	// Check both images have same size and it is the same than the filter size
	if( (inImage.getNChannels() != 1) && (inImage.getNChannels() != 3) )
	THROW_EXCEPTION( "Trying to compute blobs on images with non supporte format -> only RGB or GRAYSCALE images supported" );

	// Request temp image to work with
	IplImage* cvTempImage = ImageResourceManager::getSingleton().getImage( inImage.getWidth(), inImage.getHeight(), 1 );

	// If they have different number of channels -> convert them
	if ( inImage.getNChannels() == 3 )
		cvConvertImage( &inImage.getCVImage(), cvTempImage );
	// just one channel -> Copy the input image
	else 
		cvCopy( &inImage.getCVImage(), cvTempImage );

	// Find blobs (openCV contours)	
	int retrivalMode = CV_RETR_EXTERNAL; // CV_RETR_CCOMP
	cvFindContours( cvTempImage, m_findContoursStorage, &m_contour, sizeof(CvContour), retrivalMode, CV_CHAIN_APPROX_SIMPLE );

	// Extract found contours    

	// Iterate through found contours and store them..
	m_blobs.clear();
	for( ; m_contour != 0; m_contour = m_contour->h_next )
	{
		// Get contour area
		double area = fabs( cvContourArea( m_contour, CV_WHOLE_SEQ ) );

		// If it has a good size (between min and max)
		if ( ( area > m_maxBlobArea ) || ( area < m_minBlobArea ) )
		  continue;

		// Store new Blob
		m_blobs.push_back( Blob( area, m_contour ) );
	}

	// Release temp image
	ImageResourceManager::getSingleton().releaseImage( cvTempImage );

	// Extract information of found blobs
	extractBlobsInformation();

	// Clear OpenCV contours storage 
	cvClearMemStorage( m_findContoursStorage );
}
Exemple #29
0
uint16_t findcontour(IplImage* img) {
	CvMemStorage* storage = cvCreateMemStorage();
	CvSeq* first_contour = NULL;
	uint16_t count = 0;
	uint16_t Nc = cvFindContours(img, storage, &first_contour, sizeof(CvContour), CV_RETR_LIST);
	for(CvSeq* c = first_contour; c!=NULL; c = c->h_next) if((c->total)>80) count++;
	cvReleaseMemStorage(&storage);
	return count;
}
////////////////////////////////////////////////////////////////
//  ラベリング処理
////////////////////////////////////////////////////////////////
void cv_Labelling(
    IplImage *src_img,       //入力画像(8Bitモノクロ)
    IplImage *dst_img        //出力画像(8Bit3chカラー)
) {

    CvMemStorage *storage = cvCreateMemStorage (0);
    CvSeq *contours = NULL;

    if (src_img == NULL)
        return;

    // 画像の二値化【判別分析法(大津の二値化)】
    cvThreshold (src_img, src_img, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);

    // 輪郭の検出(戻り値は取得した輪郭の全個数)
    int find_contour_num = cvFindContours (
                               src_img,                     // 入力画像
                               storage,                      // 抽出された輪郭を保存する領域
                               &contours,                  // 一番外側の輪郭へのポインタへのポインタ
                               sizeof (CvContour),      // シーケンスヘッダのサイズ
                               CV_RETR_EXTERNAL,       // 抽出モード
                               // CV_RETR_EXTERNAL - 最も外側の輪郭のみ抽出
                               // CV_RETR_LIST - 全ての輪郭を抽出し,リストに追加
                               // CV_RETR_CCOMP - 全ての輪郭を抽出し,
                               // 二つのレベルを持つ階層構造を構成する.
                               // 1番目のレベルは連結成分の外側の境界線,
                               // 2番目のレベルは穴(連結成分の内側に存在する)の境界線.
                               // CV_RETR_TREE - 全ての輪郭を抽出し,
                               // 枝分かれした輪郭を完全に表現する階層構造を構成する.
                               CV_CHAIN_APPROX_NONE    // CV_CHAIN_APPROX_SIMPLE:輪郭の折れ線の端点を取得
                               // CV_CHAIN_APPROX_NONE: 輪郭の全ての点を取得
                               // Teh-Chinチェーンの近似アルゴリズム中の一つを適用する
                               // CV_CHAIN_APPROX_TC89_L1
                               // CV_CHAIN_APPROX_TC89_KCOS
                           );

    if (contours != NULL) {
        //処理後画像を0(黒)で初期化
        cvSet(dst_img, CV_RGB( 0, 0, 0 ));
        //輪郭の描画
        DrawNextContour(dst_img, contours, 1);
    }

    cvDrawContours( dst_img, max_perimeter_contor, CV_RGB( 255, 0, 0 ), CV_RGB( 255, 0, 0 ), 0, 2);
        for ( int i = 0; i < max_perimeter_contor->total; i++) {
        CvPoint *point = CV_GET_SEQ_ELEM (CvPoint, max_perimeter_contor, i);
        //std::cout << "x:" << point->x << ", y:" << point->y  << std::endl;
    }
	//輪郭を構成する頂点座標を取得
    /*for ( int i = 0; i < Contour->total; i++) {
        CvPoint *point = CV_GET_SEQ_ELEM (CvPoint, Contour, i);
        std::cout << "x:" << point->x << ", y:" << point->y  << std::endl;

    }*/
    //メモリストレージの解放
    cvReleaseMemStorage (&storage);
}