Ejemplo n.º 1
0
float MainWindow::matchTwoShapes(IplImage* image1, IplImage* image2)
{
    double matchresult = 100;
    double mincontour = 200;  // taille mimale du contour qu il faut le detecter
    int CVCONTOUR_APPROX_LEVEL;
    IplImage* img1_edge = cvCreateImage(cvGetSize(image1), 8, 1);
    IplImage* img2_edge = cvCreateImage(cvGetSize(image2), 8, 1);

    cvThreshold(image1, img1_edge, 128, 255, CV_THRESH_BINARY);
    cvThreshold(image2, img2_edge, 128, 255, CV_THRESH_BINARY);
    CvMemStorage* storage = cvCreateMemStorage();
    CvMemStorage* storage2 = cvCreateMemStorage();
    CvSeq* premier_contour_img1 = NULL;
    CvSeq* premier_contour_img2 = NULL;
    CvSeq* newseq = NULL;
    CvSeq* newseq2 = NULL;

    //first Border extraction
    cvFindContours(img1_edge, storage, &premier_contour_img1, sizeof(CvContour), CV_RETR_LIST);
    //second border extraction
    cvFindContours(img2_edge, storage2, &premier_contour_img2, sizeof(CvContour), CV_RETR_LIST);

    CVCONTOUR_APPROX_LEVEL = m_ui->tolerance_lvl->value();
    //extract aprox polu
    for (CvSeq* c = premier_contour_img1; c != NULL; c = c->h_next)
    {
        if (cvContourPerimeter(c) > mincontour)
        {
            newseq = cvApproxPoly(c, sizeof(CvContour), storage, CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL, 0); //pprox
        }
    }

    for(CvSeq* c = premier_contour_img2; c != NULL; c = c->h_next)
    {
        if (cvContourPerimeter(c) > mincontour)
        {
            newseq2 = cvApproxPoly(c, sizeof(CvContour), storage2, CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL, 0); //pprox
        }
    }

    //match the two contours
    if(newseq && newseq2)
    {
        matchresult = cvMatchShapes(newseq2, newseq, 1, 0.0); // inainte era cvMatchContours
    }

    cvReleaseImage(&img1_edge);
    cvReleaseImage(&img2_edge);
    cvReleaseMemStorage(&storage);
    cvReleaseMemStorage(&storage2);

    return matchresult;
}
Ejemplo n.º 2
0
int contorsFindBox(IplImage *src, CvMemStorage* storage, CvBox2D *box)
{
    CvSeq *contours;
    int ret;
    double area;
    assert((area = src->width * src->height) > 0);

    ret = cvFindContours(src, storage,
                              &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));
    if (ret == 0) return 1;

    for (CvSeq *c = contours; c != NULL; c = c->h_next) {
        c = cvApproxPoly(c, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 5, 1);
        double contour_area = fabs(cvContourArea(c, CV_WHOLE_SEQ, 0));
        double ratio = area / contour_area;

        if (ratio > 1.5 && ratio < 6.0) {
            CvBox2D b = cvMinAreaRect2(c, NULL);
            memcpy(box, &b, sizeof(CvBox2D));

            return 0;
        }
    }

    return 1;
}
Ejemplo n.º 3
0
void find_contour(struct ctx *ctx)
{
	double area, max_area = 0.0;
	CvSeq *contours, *tmp, *contour = NULL;

	/* cvFindContours modifies input image, so make a copy */
	cvCopy(ctx->thr_image, ctx->temp_image1, NULL);
	cvFindContours(ctx->temp_image1, ctx->temp_st, &contours,
		       sizeof(CvContour), CV_RETR_EXTERNAL,
		       CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

	/* Select contour having greatest area */
	for (tmp = contours; tmp; tmp = tmp->h_next) {
		area = fabs(cvContourArea(tmp, CV_WHOLE_SEQ, 0));
		if (area > max_area) {
			max_area = area;
			contour = tmp;
		}
	}

	/* Approximate contour with poly-line */
	if (contour) {
		contour = cvApproxPoly(contour, sizeof(CvContour),
				       ctx->contour_st, CV_POLY_APPROX_DP, 2,
				       1);
		ctx->contour = contour;
	}
}
int main( int argc, char** argv )
{
    int i, j;
    CvMemStorage* storage = cvCreateMemStorage(0);
    IplImage* img = cvCreateImage( cvSize(w,w), 8, 1 );

    cvZero( img );

    for( i=0; i < 6; i++ )
    {
        int dx = (i%2)*250 - 30;
        int dy = (i/2)*150;
        CvScalar white = cvRealScalar(255);
        CvScalar black = cvRealScalar(0);

        if( i == 0 )
        {
            for( j = 0; j <= 10; j++ )
            {
                double angle = (j+5)*CV_PI/21;
                cvLine(img, cvPoint(cvRound(dx+100+j*10-80*cos(angle)),
                    cvRound(dy+100-90*sin(angle))),
                    cvPoint(cvRound(dx+100+j*10-30*cos(angle)),
                    cvRound(dy+100-30*sin(angle))), white, 1, 8, 0);
            }
        }

        cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(100,70), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(10,5), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+150, dy+150), cvSize(40,10), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+27, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+273, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
    }

    cvNamedWindow( "image", 1 );
    cvShowImage( "image", img );

    cvFindContours( img, storage, &contours, sizeof(CvContour),
                    CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

    // comment this out if you do not want approximation
    contours = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 );

    cvNamedWindow( "contours", 1 );
    cvCreateTrackbar( "levels+3", "contours", &levels, 7, on_trackbar );
    
    on_trackbar(0);
    cvWaitKey(0);
    cvReleaseMemStorage( &storage );
    cvReleaseImage( &img );

    return 0;
}
Ejemplo n.º 5
0
static void
find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
    CvMemStorage * mem_storage, CvSeq * contours)
{
  CvContourScanner scanner;
  CvSeq *c;
  int numCont = 0;
  /* Just some convenience variables */
  const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
  const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);

  /* CLEAN UP RAW MASK */
  cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
  cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);
  /* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
  if (mem_storage == NULL) {
    mem_storage = cvCreateMemStorage (0);
  } else {
    cvClearMemStorage (mem_storage);
  }

  scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour),
      CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));

  while ((c = cvFindNextContour (scanner)) != NULL) {
    double len = cvContourArea (c, CV_WHOLE_SEQ, 0);
    /* calculate perimeter len threshold: */
    double q = (mask->height + mask->width) / perimScale;
    /* Get rid of blob if its perimeter is too small: */
    if (len < q) {
      cvSubstituteContour (scanner, NULL);
    } else {
      /* Smooth its edges if its large enough */
      CvSeq *c_new;
      if (poly1_hull0) {
        /* Polygonal approximation */
        c_new =
            cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP,
            CVCONTOUR_APPROX_LEVEL, 0);
      } else {
        /* Convex Hull of the segmentation */
        c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1);
      }
      cvSubstituteContour (scanner, c_new);
      numCont++;
    }
  }
  contours = cvEndFindContours (&scanner);

  /* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */
  cvZero (mask);
  /* DRAW PROCESSED CONTOURS INTO THE MASK */
  for (c = contours; c != NULL; c = c->h_next)
    cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0,
            0));
}
Ejemplo n.º 6
0
void ShapeClassifier::StartTraining(TrainingSet *sampleSet) {
	// Make a copy of the set used for training (we'll want to save it later)
	sampleSet->CopyTo(&trainSet);

	cvClearMemStorage(templateStorage);
    templateContours = NULL;

    // TODO: call into trainingset class to do this instead of accessing samplemap
    for (map<UINT, TrainingSample*>::iterator i = sampleSet->sampleMap.begin(); i != sampleSet->sampleMap.end(); i++) {
        TrainingSample *sample = (*i).second;
        if (sample->iGroupId == GROUPID_POSSAMPLES) { // positive sample

            IplImage *grayscale = cvCreateImage( cvSize(sample->fullImageCopy->width, sample->fullImageCopy->height), IPL_DEPTH_8U, 1);
            cvCvtColor(sample->fullImageCopy, grayscale, CV_BGR2GRAY);
            cvCanny(grayscale, grayscale, SHAPE_CANNY_EDGE_LINK, SHAPE_CANNY_EDGE_FIND, SHAPE_CANNY_APERTURE);
			cvDilate(grayscale, grayscale, 0, 2);

            CvMemStorage *storage = cvCreateMemStorage(0);
            CvSeq *sampleContours = NULL;

            cvFindContours(grayscale, storage, &sampleContours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_KCOS);
			if (sampleContours != NULL) {
			    sampleContours = cvApproxPoly(sampleContours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 0.2, 1 );
				for (CvSeq *contour = sampleContours; contour != NULL; contour = contour->h_next)
				{
					if ((contour->total > SHAPE_MIN_CONTOUR_POINTS) && (contour->flags & CV_SEQ_FLAG_CLOSED)){
						if (!templateContours) {
							templateContours = cvCloneSeq(contour, templateStorage);
						} else {
							CvSeq *newContour = cvCloneSeq(contour, templateStorage);
							newContour->h_next = templateContours->h_next;
							templateContours->h_next = newContour;
						}
					}
				}
			}
            cvReleaseMemStorage(&storage);
            cvReleaseImage(&grayscale);

		} else if (sample->iGroupId == GROUPID_NEGSAMPLES) { // negative sample
            // do nothing for now
            // TODO: we could compare guesses against these as well and remove them if they match
        }
    }

    UpdateContourImage();

    if (isOnDisk) { // this classifier has been saved so we'll update the files
        Save();        
    }

    // update member variables
	isTrained = true;
}
Ejemplo n.º 7
0
CV_IMPL CvSeq*
cvSegmentFGMask( CvArr* _mask, int poly1Hull0, float perimScale,
                 CvMemStorage* storage, CvPoint offset )
{
    CvMat mstub, *mask = cvGetMat( _mask, &mstub );
    CvMemStorage* tempStorage = storage ? storage : cvCreateMemStorage();
    CvSeq *contours, *c;
    int nContours = 0;
    CvContourScanner scanner;
    
    // clean up raw mask
    cvMorphologyEx( mask, mask, 0, 0, CV_MOP_OPEN, 1 );
    cvMorphologyEx( mask, mask, 0, 0, CV_MOP_CLOSE, 1 );

    // find contours around only bigger regions
    scanner = cvStartFindContours( mask, tempStorage,
        sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset );
    
    while( (c = cvFindNextContour( scanner )) != 0 ) 
    {
        double len = cvContourPerimeter( c );
        double q = (mask->rows + mask->cols)/perimScale; // calculate perimeter len threshold
        if( len < q ) //Get rid of blob if it's perimeter is too small
            cvSubstituteContour( scanner, 0 );
        else //Smooth it's edges if it's large enough
        {
            CvSeq* newC;
            if( poly1Hull0 ) //Polygonal approximation of the segmentation 
                newC = cvApproxPoly( c, sizeof(CvContour), tempStorage, CV_POLY_APPROX_DP, 2, 0 ); 
            else //Convex Hull of the segmentation
                newC = cvConvexHull2( c, tempStorage, CV_CLOCKWISE, 1 );
            cvSubstituteContour( scanner, newC );
            nContours++;
        }
    }
    contours = cvEndFindContours( &scanner );

    // paint the found regions back into the image
    cvZero( mask );
    for( c=contours; c != 0; c = c->h_next ) 
        cvDrawContours( mask, c, cvScalarAll(255), cvScalarAll(0), -1, CV_FILLED, 8,
            cvPoint(-offset.x,-offset.y));

    if( tempStorage != storage )
    {
        cvReleaseMemStorage( &tempStorage );
        contours = 0;
    }

    return contours;
}
Ejemplo n.º 8
0
void COpenCVCheck::OpenCVBinary(CString fileName)
{
	CvScalar colors[] = {{255,255,255},{0,0,0}};
	IplImage* pImg; //声明IplImage指针
	if((pImg = cvLoadImage(fileName, 0)) != 0)
	{
		IplImage* dst = NULL;
		dst=cvCreateImage(cvSize(pImg->width,pImg->height),IPL_DEPTH_8U,1);
		//cvThreshold(pImg,dst,185,255,CV_THRESH_BINARY);
		cvAdaptiveThreshold(pImg,dst,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,5,3);//二值化

		ReverseColor(dst);
		for (int kk = 0;kk<2;kk++)   //去噪
		{
			CvSeq *contours;
			CvMemStorage* storage = cvCreateMemStorage(0);

			cvFindContours( dst, storage, &contours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
			//此函数以黑色为背景色
			while(contours)
			{
				//approximate contour with accuracy proportional
				CvSeq* result = cvApproxPoly( contours, sizeof(CvContour),
					storage,CV_POLY_APPROX_DP, 3, 1);
				//to filter the noisy contour
				if(fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 2)
				{
					if (result->total > 0)
					{
						for(int  i = 0; i < (result ? result->total : 0); i++)
						{
							CvRect* r = (CvRect*)cvGetSeqElem(result,i);
							cvSet2D(dst,r->y,r->x,colors[1]);

						}

					}
				}
				contours = contours->h_next;
			}	
		}
		ReverseColor(dst);
		ClearNoise(dst);
		cvSaveImage(fileName,dst);
		cvReleaseImage(&dst);
		cvReleaseImage(&pImg);
	}
}
Ejemplo n.º 9
0
CvSeq * 
getPolygon(CvSeq * aContour)
	{
		CvMemStorage* storage = cvCreateMemStorage(0);
		double contourPerimeter;
		CvSeq* aPolyContour;
		
		contourPerimeter=cvContourPerimeter(aContour);
		aPolyContour=cvApproxPoly (aContour, sizeof(CvContour),
				storage,CV_POLY_APPROX_DP, contourPerimeter/PER_TOLERANCE
				, 0);
		
		
		
		return aPolyContour;
	}
Ejemplo n.º 10
0
CvPoint CTools::QuartzPostion(IplImage* src, IplImage* dst)
{
	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSeq * contour = 0;
	int mode = CV_RETR_EXTERNAL;
	double length;
	CvPoint2D32f center;
	float r;
	CvPoint pt; 

	pt.y = 1000;
	pt.x = 0;

	CalibrateData m_CalDat;

	GetCalirateParam(&m_CalDat);

	IplImage* temp = cvCreateImage(cvGetSize(src), 8, 1);
	cvCanny(src, temp, 50, 100);

	cvFindContours(temp, storage, &contour, sizeof(CvContour), mode);

	for( CvSeq* c = contour; c != NULL; c = c->h_next)
	{
		c = cvApproxPoly( c, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 5, 1 );
		length = cvArcLength(c, CV_WHOLE_SEQ, -1);
		if ((length > m_CalDat.WaferPxLow) && (length < m_CalDat.WaferPxHigh))
		{
			cvDrawContours(dst, c, CV_RGB(0,0,255), CV_RGB(255, 0, 0), -1, 2, 8);
			cvMinEnclosingCircle(c, &center, &r);
			if ((center.y > 336) && (center.y < pt.y))
			{
				pt = cvPointFrom32f(center);
			}
			//pt[num] = cvPointFrom32f(center);
			//cvCircle(pContoursImg, pt[num], 3, CV_RGB(0,0,255), -1);
			//cvCircle(pContoursImg, pt[num], r, CV_RGB(0,0,255), 2);
		}
	}
	cvCircle(dst, pt, 10, CV_RGB(255,0, 0), -1);
	cvReleaseImage(&temp);
	cvClearMemStorage( storage );
	cvReleaseMemStorage( &storage );

	return pt;

}
Ejemplo n.º 11
0
/*
 * Approximates polygonal curves with desired precision
 * @overload approx_poly(options)
 *   @param options [Hash] Parameters
 *   @option options [Symbol] :method Approximation method (default :dp)
 *     * :dp - Douglas-Peucker algorithm.
 *   @option options [Number] :accuracy Parameter specifying the approximation accuracy.
 *     This is the maximum distance between the original curve and its approximation.
 *   @option options [Boolean] :recursive Recursion flag. If true, the function approximates
 *     all the contours accessible from curve by h_next and v_next links.
 * @return [CvContour] Result of the approximation
 * @return [nil] Approximation faied
 * @opencv_func cvApproxPoly
 */
VALUE
rb_approx_poly(int argc, VALUE *argv, VALUE self)
{
  VALUE approx_poly_option;
  rb_scan_args(argc, argv, "01", &approx_poly_option);  
  approx_poly_option = APPROX_POLY_OPTION(approx_poly_option);
  VALUE storage = cCvMemStorage::new_object();
  CvSeq *contour = cvApproxPoly(CVCONTOUR(self), sizeof(CvContour), CVMEMSTORAGE(storage),
				APPROX_POLY_METHOD(approx_poly_option),
				APPROX_POLY_ACCURACY(approx_poly_option),
				APPROX_POLY_RECURSIVE(approx_poly_option));

  if (contour && contour->total > 0) {
    return cCvSeq::new_sequence(cCvContour::rb_class(), contour, cCvPoint::rb_class(), storage);
  }
  return Qnil;
}
Ejemplo n.º 12
0
void EyeCorners::detectEyeCorners(IplImage* img){

	CvSeq* polygon;
	CvMemStorage* storage1 = 0;
	CvMemStorage* storage;

	IplImage* edge = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); //converting  color image to gray scale
	CannyEdge cannyEdge;

	edge=cannyEdge.detectEdge(img);

	CvContour contour;
	CvSeq* first_contour=NULL;
	if(edge){
		int Nc = cvFindContours(edge,storage,&first_contour,sizeof(CvContour),CV_RETR_LIST);
		cout<<"Total contours detected:"<<Nc<<endl;
		cvShowImage("Canny",edge);
		//cvDrawContours(img,first_contour,cvScalarAll(255),	cvScalarAll(255), 100 );
		//cvFindDominantPoints(first_contour,storage,CV_DOMINANT_IPAN,1,2,3,4);
		cout<<"Length="<<cvContourPerimeter(first_contour)<<endl;
		cout<<first_contour->first;

	}
	CvRect eyeRect=cvBoundingRect(first_contour,1);
	cvRectangle(img,cvPoint(eyeRect.x,eyeRect.y),cvPoint(eyeRect.x+eyeRect.width,eyeRect.y+eyeRect.height),cvScalar(255,255,0),1);
    polygon = cvApproxPoly(first_contour, sizeof(CvContour), storage1,CV_POLY_APPROX_DP, 100);

    if(polygon){
    	cout<<"draw polygon pts:"<<polygon->total<<endl;
		for( int i = 0; i < polygon->total; i++ ){
			float* p = (float*)cvGetSeqElem( polygon, i );
			CvPoint temppt=cvPoint(cvRound(p[0]),cvRound(p[1]));
			cvCircle( img, temppt, 2, CV_RGB(255,0,0), 2, 8, 0 );
		}
    }
}
Ejemplo n.º 13
0
void bContourFinder::smoothApproxChains(){ //int smoothMod, float value){
  CvMemStorage *stor = cvCreateMemStorage(); 
  CvSeq * ptseq = cvCreateSeq( CV_SEQ_KIND_CURVE  | CV_SEQ_ELTYPE_POINT,
                                    sizeof(CvSeq),
                                    sizeof(CvPoint),
                                    stor );
  CvSeq * hull; 
  CvPoint pt; 
  this->convexBlobs.clear();
  for(int i = 0 ; i < this->blobs.size(); i++){
    this->convexBlobs.push_back(ofxCvBlob());
    this->convexBlobs[i] = this->blobs[i];
    this->convexBlobs[i].pts.clear(); 
    // get blob i
    for(int j = 0; j < this->blobs[i].pts.size(); j++){
      // fill in blob points 
      pt.x = this->blobs[i].pts[j].x; 
      pt.y = this->blobs[i].pts[j].y; 
      cvSeqPush( ptseq, &pt); 
    }
  

    hull = cvApproxPoly(ptseq, sizeof(CvContour), stor,
        CV_POLY_APPROX_DP, cvContourPerimeter(ptseq)*0.004, 0);

    // get the points for the blob:
    CvPoint           pt  = **CV_GET_SEQ_ELEM( CvPoint*, hull, hull->total - 1 );
    for( int j=0; j < hull->total; j++ ) {
      pt = **CV_GET_SEQ_ELEM( CvPoint*, hull, j );
      convexBlobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) );
    }
    convexBlobs[i].nPts = convexBlobs[i].pts.size();
  }
  cvClearMemStorage( stor ); 
  
}
Ejemplo n.º 14
0
CvSeq* EyeTracker::findSquares()
{
	CvSeq* contours;
	int i, N = 5;
	CvSize sz = cvSize( sceneImagePts->width, sceneImagePts->height);
	IplImage* gray = cvCreateImage( sz, 8, 1 );
	IplImage* tgray = cvCreateImage( sz, 8, 1 );
	CvSeq* result;
	double s, t;

	CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), squareStorage);

	cvCvtColor(sceneImagePts, tgray, CV_RGB2GRAY);

	// apply threshold if l!=0:
	//     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
	cvThreshold(tgray, gray, squareThreshold, 255, CV_THRESH_BINARY );

	// find contours and store them all as a list
	cvFindContours(gray, squareStorage, &contours, sizeof(CvContour),
				   CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));

	// test each contour
	while(contours)
	{
		// approximate contour with accuracy proportional
		// to the contour perimeter
		result = cvApproxPoly(contours, 
							  sizeof(CvContour), 
							  squareStorage,
							  CV_POLY_APPROX_DP, 
							  cvContourPerimeter(contours) * 0.02, 
							  0);

		// square contours should have 4 vertices after approximation
		// relatively large area (to filter out noisy contours)
		// and be convex.
		// Note: absolute value of an area is used because
		// area may be positive or negative - in accordance with the
		// contour orientation
		if(result->total == 4 &&
		   fabs(cvContourArea(result, CV_WHOLE_SEQ)) > 1000 &&
		   cvCheckContourConvexity(result))
		{
			s = 0;

			for(i = 0; i < 5; ++i)
			{
				// find minimum angle between joint
				// edges (maximum of cosine)
				if(i >= 2)
				{
					t = fabs(angle((CvPoint*)cvGetSeqElem(result, i),
								   (CvPoint*)cvGetSeqElem(result, i-2),
								   (CvPoint*)cvGetSeqElem(result, i-1)));
					s = s > t ? s : t;
				}
			}

			// if cosines of all angles are small
			// (all angles are ~90 degree) then write quandrange
			// vertices to resultant sequence
			if(s < 0.2)
			{
				CvSeqReader reader;

				// initialize reader of the sequence
				cvStartReadSeq(result, &reader, 0);
				CvPoint pt[4];

				CV_READ_SEQ_ELEM( pt[0], reader );
				CV_READ_SEQ_ELEM( pt[1], reader );
				CV_READ_SEQ_ELEM( pt[2], reader );
				CV_READ_SEQ_ELEM( pt[3], reader );

				for(int i = 1; i < 4; ++i)
				{
					for(int j = 0; j < 4 - i; ++j)
					{
						if(pt[j].x > pt[j+1].x)
						{
							CvPoint temp = pt[j+1];
							pt[j+1] = pt[j];
							pt[j] = temp;
						}
					}
				}

				if(pt[0].y > pt[1].y)
				{
					CvPoint temp = pt[1];
					pt[1] = pt[0];
					pt[0] = temp;
				}

				if(pt[2].y > pt[3].y)
				{
					CvPoint temp = pt[3];
					pt[3] = pt[2];
					pt[2] = temp;
				}

				if(abs(pt[0].y - pt[1].y) > 240)
				{
					sceneCorner[0] = pt[0];
					sceneCorner[1] = pt[1];
					sceneCorner[2] = pt[2];
					sceneCorner[3] = pt[3];

					for(int i = 0; i < 4; ++i)
						cvSeqPush(squares, (CvPoint*) cvGetSeqElem(result, i));

					break;
				}
			}
		}

		// take the next contour
		contours = contours->h_next;
	}

	// release all the temporary images
	cvReleaseImage(&gray);
	cvReleaseImage(&tgray);
	
	return squares;
}
int main(int argc, char* argv[]) {
    CvMemStorage *contStorage = cvCreateMemStorage(0);
    CvSeq *contours;
    CvTreeNodeIterator polyIterator;
    
    int found = 0;
    int i;
    CvPoint poly_point;
	int fps=30;
	
	// ポリライン近似
    CvMemStorage *polyStorage = cvCreateMemStorage(0);
    CvSeq *polys, *poly;

	// OpenCV variables
	CvFont font;
	
    printf("start!\n");

	//pwm initialize
	if(gpioInitialise() < 0) return -1;
	//pigpio CW/CCW pin setup
	//X:18, Y1:14, Y2:15
	gpioSetMode(18, PI_OUTPUT);
	gpioSetMode(14, PI_OUTPUT);
	gpioSetMode(15, PI_OUTPUT);
	//pigpio pulse setup
	//X:25, Y1:23, Y2:24
	gpioSetMode(25, PI_OUTPUT);
	gpioSetMode(23, PI_OUTPUT);
	gpioSetMode(24, PI_OUTPUT);
	//limit-switch setup
	gpioSetMode(5, PI_INPUT);
	gpioWrite(5, 0);
	gpioSetMode(6, PI_INPUT);
	gpioWrite(6, 0);
	gpioSetMode(7, PI_INPUT);
	gpioWrite(7, 0);
	gpioSetMode(8, PI_INPUT);
	gpioWrite(8, 0);
	gpioSetMode(13, PI_INPUT);
	gpioSetMode(19, PI_INPUT);
	gpioSetMode(26, PI_INPUT);
	gpioSetMode(21, PI_INPUT);
 
	CvCapture* capture_robot_side = cvCaptureFromCAM(0);
	CvCapture* capture_human_side = cvCaptureFromCAM(1);
    if(capture_robot_side == NULL){
		std::cout << "Robot Side Camera Capture FAILED" << std::endl;
		return -1;
	 }
	if(capture_human_side ==NULL){
		std::cout << "Human Side Camera Capture FAILED" << std::endl;
		return -1;
	}

	// size設定
    cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	//fps設定
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps);

	// 画像の表示用ウィンドウ生成
	//cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("pack", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE);

	//Create trackbar to change brightness
	int iSliderValue1 = 50;
	cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100);
	//Create trackbar to change contrast
	int iSliderValue2 = 50;
	cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100);
	//pack threthold 0, 50, 120, 220, 100, 220
	int iSliderValuePack1 = 54; //80;
	cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255);
	int iSliderValuePack2 = 84;//106;
	cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255);
	int iSliderValuePack3 = 100;//219;
	cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255);
	int iSliderValuePack4 = 255;//175;
	cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255);
	int iSliderValuePack5 = 0;//29;
	cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255);
	int iSliderValuePack6 = 255;//203;
	cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255);
	//mallet threthold 0, 255, 100, 255, 140, 200
	int iSliderValuemallet1 = 106;
	cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255);
	int iSliderValuemallet2 = 135;
	cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255);
	int iSliderValuemallet3 = 218;//140
	cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255);
	int iSliderValuemallet4 = 255;
	cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255);
	int iSliderValuemallet5 = 0;
	cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255);
	int iSliderValuemallet6 = 105;
	cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255);
	
	// 画像ファイルポインタの宣言
	IplImage* img_robot_side = cvQueryFrame(capture_robot_side);
	IplImage* img_human_side = cvQueryFrame(capture_human_side);
	IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* img_all_round2  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* show_img  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	
	cv::Mat mat_frame1;
	cv::Mat mat_frame2;
	cv::Mat dst_img_v;
	cv::Mat dst_bright_cont;
	int iBrightness  = iSliderValue1 - 50;
	double dContrast = iSliderValue2 / 50.0;
	IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3);
	IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1);

	int rotate_times = 0;
	//IplImage* -> Mat
	mat_frame1 = cv::cvarrToMat(img_robot_side);
	mat_frame2 = cv::cvarrToMat(img_human_side);
	//上下左右を反転。本番環境では、mat_frame1を反転させる
	cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
	cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
	vconcat(mat_frame2, mat_frame1, dst_img_v);

	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
	//画像の膨張と縮小
//	cv::Mat close_img;
//	cv::Mat element(3,3,CV_8U, cv::Scalar::all(255));
//	cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3);
//	cv::imshow("morphologyEx", dst_img_v);
//	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート

	//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
	*img_all_round = dst_bright_cont;

	cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 54, 77, 255, 0, 255);

	cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY);
	cv_Labelling(grayscale_img, tracking_img);

	cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY);

	cvCopy( poly_gray, poly_tmp);
	cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR);

	//画像の膨張と縮小
	//cvMorphologyEx(tracking_img, tracking_img,)

	// 輪郭抽出
	found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

	// ポリライン近似
	polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10);

	cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10);
	poly = (CvSeq *)cvNextTreeNode( &polyIterator);
	printf("sort before by X\n");
	for( i=0; i<poly->total; i++)
	{
		poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//		cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1);
//		cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255));
		std::cout << "x:" << poly_point.x << ", y:" << poly_point.y  << std::endl;
	}
	printf("Poly FindTotal:%d\n",poly->total);

	//枠の座標決定
	//左上 の 壁サイド側 upper_left_f
	//左上 の ゴール寄り  upper_left_g
	//右上 の 壁サイド側 upper_right_f
	//右上 の ゴール寄り  upper_right_g
	//左下 の 壁サイド側 lower_left_f
	//左下 の ゴール寄り  lower_left_g
	//右下 の 壁サイド側 lower_right_f
	//右下 の ゴール寄り  lower_right_g
	CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g,
			lower_left_f, lower_left_g, lower_right_f, lower_right_g,
			robot_goal_left, robot_goal_right;

	CvPoint frame_points[8];
//	if(poly->total == 8){
//		for( i=0; i<8; i++){
//			poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//			frame_points[i] = poly_point;
//		}
//		qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint);
//		printf("sort after by X\n");
//		for( i=0; i<8; i++){
//			std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y  << std::endl;
//		}
//		if(frame_points[0].y < frame_points[1].y){
//			upper_left_f = frame_points[0];
//			lower_left_f = frame_points[1];
//		}
//		else{
//			upper_left_f = frame_points[1];
//			lower_left_f = frame_points[0];
//		}
//		if(frame_points[2].y < frame_points[3].y){
//			upper_left_g = frame_points[2];
//			lower_left_g = frame_points[3];
//		}
//		else{
//			upper_left_g = frame_points[3];
//			lower_left_g = frame_points[2];
//		}
//		if(frame_points[4].y < frame_points[5].y){
//			upper_right_g = frame_points[4];
//			lower_right_g = frame_points[5];
//		}
//		else{
//			upper_right_g = frame_points[5];
//			lower_right_g = frame_points[4];
//		}
//		if(frame_points[6].y < frame_points[7].y){
//			upper_right_f = frame_points[6];
//			lower_right_f = frame_points[7];
//		}
//		else{
//			upper_right_f = frame_points[7];
//			lower_right_f = frame_points[6];
//		}
//	}
//	else{
		printf("Frame is not 8 Point\n");
		upper_left_f = cvPoint(26, 29);
		upper_right_f =  cvPoint(136, 29);
		lower_left_f = cvPoint(26, 220);
		lower_right_f =  cvPoint(136, 220);

		upper_left_g = cvPoint(38, 22);
		upper_right_g = cvPoint(125, 22);
		lower_left_g =  cvPoint(38, 226);
		lower_right_g = cvPoint(125, 226);

		robot_goal_left = cvPoint(60, 226);
		robot_goal_right = cvPoint(93, 226);

//		cvCopy(img_all_round, show_img);
//		cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 ));
//
//		cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 ));

		//while(1){
			//cvShowImage("Now Image", show_img);
			//cvShowImage ("Poly", poly_dst);
			//if(cv::waitKey(1) >= 0) {
				//break;
			//}
		//}
		//return -1;
//	}
	printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y);
	printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y);
	printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y);
	printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y);
	printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y);
	printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y);
	printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y);
	printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y);
	printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y);
	printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y);

    cvReleaseImage(&dst_img_frame);
    cvReleaseImage(&grayscale_img);
    cvReleaseImage(&poly_tmp);
    cvReleaseImage(&poly_gray);

    cvReleaseMemStorage(&contStorage);
    cvReleaseMemStorage(&polyStorage);
	//return 1;
	// Init font
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);
	bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする
	
	while(1){
		//決定ボタンが押されたらスタート
		if(gpioRead(8)==0 && is_pushed_decision_button==1){
			cvCopy(img_all_round, img_all_round2);
			cvCopy(img_all_round, show_img);
			img_robot_side = cvQueryFrame(capture_robot_side);
			img_human_side = cvQueryFrame(capture_human_side);
			//IplImage* -> Mat
			mat_frame1 = cv::cvarrToMat(img_robot_side);
			mat_frame2 = cv::cvarrToMat(img_human_side);
			//上下左右を反転。本番環境では、mat_frame1を反転させる
			cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
			cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
			vconcat(mat_frame2, mat_frame1, dst_img_v);

			iBrightness  = iSliderValue1 - 50;
			dContrast = iSliderValue2 / 50.0;
			dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
			//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
			*img_all_round = dst_bright_cont;
			mat_frame1.release();
			mat_frame2.release();
			dst_img_v.release();
	
			IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);

			cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);
			cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6);
			cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);

			//CvMoments moment_mallet;
			CvMoments moment_pack;
			CvMoments moment_mallet;
			CvMoments moment2_pack;
			//cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img_pack, 1);
			cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img2_pack, 1);

			//cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img_pack, &moment_pack, 0);
			cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img2_pack, &moment2_pack, 0);

			//座標計算
			double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0);
			double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0);
			double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1);
			double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0);
			double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0);
			double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1);
			double gX_before = m10_before/m00_before;
			double gY_before = m01_before/m00_before;
			double gX_after = m10_after/m00_after;
			double gY_after = m01_after/m00_after;
			double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0);
			double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0);
			double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1);
			double gX_now_mallet = m10_mallet/m00_mallet;
			double gY_now_mallet = m01_mallet/m00_mallet;

			int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0
			//円の大きさは全体の1/10で描画
//			cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
//			cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
//			cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2);
//			cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2);
			printf("gX_after: %f\n",gX_after);
			printf("gY_after: %f\n",gY_after);
			printf("gX_before: %f\n",gX_before);
			printf("gY_before: %f\n",gY_before);
			printf("gX_now_mallet: %f\n",gX_now_mallet);
			printf("gY_now_mallet: %f\n",gY_now_mallet);
			int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。
			//パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
			double a_inclination;
			double b_intercept;

			int closest_frequency;

			int target_coordinateX;
			int origin_coordinateY;
			int target_coordinateY;

			double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;

			if(robot_goal_right.x < gX_now_mallet){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 0;//反時計回り
			}
			else if(gX_now_mallet < robot_goal_left.x){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
			}
			else{
				//pwm output for rotate
				//台の揺れを想定してマージンをとる
				if(abs(gX_after - gX_before) <= 1 && abs(gY_after - gY_before) <= 1){//パックが動いてない場合一時停止
					gpioPWM(25, 0);
					closest_frequency = gpioSetPWMfrequency(25, 0);
					a_inclination = 0;
					b_intercept=0;
				}
				else{
					a_inclination = (gY_after - gY_before) / (gX_after - gX_before);
					b_intercept = gY_after - a_inclination * gX_after;
					//一次関数で目標X座標の計算
					if(a_inclination){
						target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination);
					}
					else{
						target_coordinateX = center_line;
					}

					origin_coordinateY = a_inclination * left_frame + b_intercept;

					int rebound_max = 5;
					int rebound_num = 0;

					while(target_coordinateX < left_frame || right_frame < target_coordinateX){
						if(target_coordinateX < left_frame){ //左側の枠での跳ね返り後の軌跡。左枠側平均
							target_coordinateX = 2 * left_frame - target_coordinateX;
							b_intercept -= 2 * ((-a_inclination) * left_frame);
							a_inclination = -a_inclination;
							origin_coordinateY = a_inclination * left_frame + b_intercept;
							if(target_coordinateX < right_frame){
							}
							else{
								//左側の枠から右側の枠に当たるときのY座標
								target_coordinateY = a_inclination * right_frame + b_intercept;
							}
						}
						else if(right_frame < target_coordinateX){ //右側の枠での跳ね返り後の軌跡。右枠側平均
							target_coordinateX = 2 * right_frame - target_coordinateX;
							b_intercept += 2 * (a_inclination * right_frame);
							a_inclination= -a_inclination;
							//cvLine(show_img, cvPoint(right_frame, b_intercept), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
							origin_coordinateY = a_inclination * right_frame + b_intercept;
							if(left_frame < target_coordinateX){
							}
							else{
								//右側の枠から左側の枠に当たるときのY座標
								target_coordinateY = a_inclination * left_frame + b_intercept;
							}
						}
						rebound_num++;
						if(rebound_max < rebound_num){
							//跳ね返りが多すぎる時は、中央を指定
							target_coordinateX = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
							break;
						}
					}
					if(target_coordinateX < center_line && center_line < gX_now_mallet){
						target_direction = 0;
						gpioPWM(25, 128);
						closest_frequency = gpioSetPWMfrequency(25, 1000);
					}
					else if(center_line < target_coordinateX && gX_now_mallet < center_line){
						target_direction = 1;
						gpioPWM(25, 128);
						closest_frequency = gpioSetPWMfrequency(25, 1000);
					}
					else{
						gpioPWM(25, 0);
						closest_frequency = gpioSetPWMfrequency(25, 0);
					}
				}
				printf("a_inclination: %f\n",a_inclination);
				printf("b_intercept: %f\n",b_intercept);
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}

			//pwm output for rotate
			//台の揺れを想定してマージンをとる
			/*if(abs(gX_after - gX_before) <= 1){//パックが動いてない場合一時停止
				gpioPWM(25, 0);
				closest_frequency = gpioSetPWMfrequency(25, 0);
				a_inclination = 0;
				b_intercept=0;
			}
			else if(gY_after-1 < gY_before ){	//packが離れていく時、台の中央に戻る
				a_inclination = 0;
				b_intercept=0;
				//目標値は中央。台のロボット側(4点)からを計算
				double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
				if(center_line + 3 < gX_now_mallet){ //+1 マージン
					gpioPWM(25, 128);
					closest_frequency = gpioSetPWMfrequency(25, 1500);
					target_direction = 0;//反時計回り
				}
				else if(gX_now_mallet < center_line-3){  //-1 マージン
					gpioPWM(25, 128);
					closest_frequency = gpioSetPWMfrequency(25, 1500);
					target_direction = 1;//時計回り
				}
				else{
					gpioPWM(25, 0);
					closest_frequency = gpioSetPWMfrequency(25, 0);
				}
			}
			else{
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				a_inclination = (gY_after - gY_before) / (gX_after - gX_before);
				b_intercept = gY_after - a_inclination * gX_after;
				//一次関数で目標X座標の計算
				if(a_inclination){
					target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination);
				}
				else{
					target_coordinateX = 0;
				}
			}

			printf("a_inclination: %f\n",a_inclination);
			printf("b_intercept: %f\n",b_intercept);

			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;
			origin_coordinateY = a_inclination * left_frame + b_intercept;
			if(target_coordinateX < left_frame){
				cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint(left_frame, origin_coordinateY), cvScalar(0,255,255), 2);
			}
			else if(right_frame < target_coordinateX){
				cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint(right_frame, origin_coordinateY), cvScalar(0,255,255), 2);
			}
			else{
				cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
			}

			int rebound_max = 5;
			int rebound_num = 0;

			while(target_coordinateX < left_frame || right_frame < target_coordinateX){
				if(target_coordinateX < left_frame){ //左側の枠での跳ね返り後の軌跡。左枠側平均
					target_coordinateX = 2 * left_frame - target_coordinateX;
					b_intercept -= 2 * ((-a_inclination) * left_frame);
					a_inclination = -a_inclination;
					origin_coordinateY = a_inclination * left_frame + b_intercept;
					if(target_coordinateX < right_frame){
						cvLine(show_img, cvPoint(left_frame, origin_coordinateY), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
					}
					else{
						//左側の枠から右側の枠に当たるときのY座標
						target_coordinateY = a_inclination * right_frame + b_intercept;
						cvLine(show_img, cvPoint(left_frame, origin_coordinateY), cvPoint(right_frame, target_coordinateY), cvScalar(0,255,255), 2);
					}
				}
				else if(right_frame < target_coordinateX){ //右側の枠での跳ね返り後の軌跡。右枠側平均
					target_coordinateX = 2 * right_frame - target_coordinateX;
					b_intercept += 2 * (a_inclination * right_frame);
					a_inclination= -a_inclination;
					//cvLine(show_img, cvPoint(right_frame, b_intercept), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
					origin_coordinateY = a_inclination * right_frame + b_intercept;
					if(left_frame < target_coordinateX){
						cvLine(show_img, cvPoint(right_frame, origin_coordinateY), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
					}
					else{
						//右側の枠から左側の枠に当たるときのY座標
						target_coordinateY = a_inclination * left_frame + b_intercept;
						cvLine(show_img, cvPoint(right_frame, origin_coordinateY), cvPoint(left_frame, target_coordinateY), cvScalar(0,255,255), 2);
					}
				}
				rebound_num++;
				if(rebound_max < rebound_num){
					//跳ね返りが多すぎる時は、中央を指定
					target_coordinateX = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
					break;
				}
			}

			printf("target_coordinateX: %d\n",target_coordinateX);
			//防御ラインの描画
			cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
			//マレットの動きの描画
			cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
			//cvPutText (show_img, to_c_char((int)gX_now_mallet), cvPoint(460,30), &font, cvScalar(220,50,50));
			//cvPutText (show_img, to_c_char((int)target_coordinateX), cvPoint(560,30), &font, cvScalar(50,220,220));

			int amount_movement = target_coordinateX - gX_now_mallet;

			//reacted limit-switch and target_direction rotate
//			if(gpioRead(6) == 1){//X軸右
//				gpioPWM(25, 128);
//				closest_frequency = gpioSetPWMfrequency(25, 1500);
//				target_direction = 0;//反時計回り
//				printf("X軸右リミット!反時計回り\n");
//			}
//			else
			if(gpioRead(26) == 1){//X軸左
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
				printf("X軸左リミット!時計回り\n");
			}
			else if(gpioRead(5) == 1){//Y軸右上
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 0);
				printf("Y軸右上リミット!時計回り\n");
			}
			else if(gpioRead(13) == 1){//Y軸右下
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 1);
				printf("Y軸右下リミット!反時計回り\n");
			}
			else if(gpioRead(19) == 1){//Y軸左下
				gpioPWM(24, 128);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 0);
				printf("Y軸左下リミット!時計回り\n");
			}

			else if(gpioRead(21) == 1){//Y軸左上
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 1);
				printf("Y軸左上リミット!反時計回り\n");
			}
			else{
				//Y軸固定のため
				gpioSetPWMfrequency(23, 0);
				gpioSetPWMfrequency(24, 0);

				if(amount_movement > 0){
					target_direction = 1;//時計回り
				}
				else if(amount_movement < 0){
					target_direction = 0;//反時計回り
				}
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			else{
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 0);
			}
			printf("setting_frequency: %d\n", closest_frequency);*/

			// 指定したウィンドウ内に画像を表示する
			//cvShowImage("Previous Image", img_all_round2);
//			cvShowImage("Now Image", show_img);
//			cvShowImage("pack", dst_img_pack);
//			cvShowImage("mallet", dst_img_mallet);
//			cvShowImage ("Poly", poly_dst);

			cvReleaseImage (&dst_img_mallet);
			cvReleaseImage (&dst_img_pack);
			cvReleaseImage (&dst_img2_mallet);
			cvReleaseImage (&dst_img2_pack);

			if(cv::waitKey(1) >= 0) {
				break;
			}
		}
		else{ //リセット信号が来た場合
			is_pushed_decision_button = 0;
		}
    }
    
    gpioTerminate();
    
    cvDestroyAllWindows();
	
	//Clean up used CvCapture*
	cvReleaseCapture(&capture_robot_side);
	cvReleaseCapture(&capture_human_side);
    //Clean up used images
	cvReleaseImage(&poly_dst);
	cvReleaseImage(&tracking_img);
    cvReleaseImage(&img_all_round);
    cvReleaseImage(&img_human_side);
    cvReleaseImage(&img_all_round2);
    cvReleaseImage(&show_img);
    cvReleaseImage(&img_robot_side);

    return 0;
}
CvSeq* CSquareDetection::FindSquares( IplImage* tgray )
{
	CvSeq* contours;
	int i, l, N = 11;
	double imgArea = tgray->width*tgray->height;
	CvSize sz = cvSize( tgray->width & -2, tgray->height & -2 );
	IplImage* gray = cvCreateImage( sz, 8, 1 ); 
	IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 1 );
	CvSeq* result;
	// create empty sequence that will contain points -
	// 4 points per square (the square's vertices)
	CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );

	// select the maximum ROI in the image
	// with the width and height divisible by 2
	cvSetImageROI( tgray, cvRect( 0, 0, sz.width, sz.height ));

	// down-scale and upscale the image to filter out the noise
	//cvPyrDown( tgray, pyr, 7 );
	//cvPyrUp( pyr, tgray, 7 );

	// try several threshold levels
	cvCanny( tgray, gray, 0, _CannyThresh, 5 );
	cvDilate( gray, gray, 0, 1 );

	for( l = 1; l < N-4; l++ )
	{
		cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
		// find contours and store them all as a list
		cvFindContours( gray, storage, &contours, sizeof(CvContour),
			CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

		// test each contour
		while( contours )
		{
			// approximate contour with accuracy proportional
			// to the contour perimeter
			result = cvApproxPoly( contours, sizeof(CvContour), storage,
				CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
			// square contours should have 4 vertices after approximation
			// relatively large area (to filter out noisy contours)
			// and be convex.
			// Note: absolute value of an area is used because
			// area may be positive or negative - in accordance with the
			// contour orientation
			double area = fabs(cvContourArea(result,CV_WHOLE_SEQ));
			if( result->total == 4 &&
				area < _maxPropotionArea*imgArea &&
				area > _minPropotionArea*imgArea &&
				cvCheckContourConvexity(result) )
			{
				// Kiem tra va sap xep lai vi tri dinh
				if (Check4Vertexes(result, _CosineThresh, _EdgeThresh))
				{
					// Dau vao mang ket qua
					for( i = 0; i < 4; i++ )
						cvSeqPush( squares,(CvPoint*)cvGetSeqElem( result, i ));
				}
			}

			// take the next contour
			contours = contours->h_next;
		}
	}
	// Loc lai
	int delta_thres = 30;
	int* flags = new int[squares->total/4];
	for (int i = 0; i < squares->total/4; i++)
		flags[i] = 0;

	CvSeq* sqrSeq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );
	CvPoint* V[4], *Vp[4];

	for (int i = 0; i < squares->total; i+=4)
	{
		if (!flags[i/4])
		{
			V[0] = (CvPoint*)cvGetSeqElem( squares, i );
			V[1] = (CvPoint*)cvGetSeqElem( squares, i+1 );
			V[2] = (CvPoint*)cvGetSeqElem( squares, i+2 );
			V[3] = (CvPoint*)cvGetSeqElem( squares, i+3 );

			for (int j = i+4; j < squares->total; j+= 4)
			{
				if (!flags[j/4])
				{
					Vp[0] = (CvPoint*)cvGetSeqElem( squares, j );
					Vp[1] = (CvPoint*)cvGetSeqElem( squares, j+1 );
					Vp[2] = (CvPoint*)cvGetSeqElem( squares, j+2 );
					Vp[3] = (CvPoint*)cvGetSeqElem( squares, j+3 );
					// xac dinh trung diem
					CvPoint M;
					M.x = (Vp[0]->x+Vp[2]->x)/2;
					M.y = (Vp[0]->y+Vp[2]->y)/2;

					if (MathHelper.ktNamTrong(V, 4, &M))
					{
						int d1 = max(MathHelper.sqrDistance(V[0], V[1]), MathHelper.sqrDistance(V[1], V[2]));
						int d2 = max(MathHelper.sqrDistance(Vp[0], Vp[1]), MathHelper.sqrDistance(Vp[1], Vp[2]));
						
						if ( d1 > d2)
						{
							V[0]->x = Vp[0]->x; V[0]->y = Vp[0]->y;
							V[1]->x = Vp[1]->x; V[1]->y = Vp[1]->y;
							V[2]->x = Vp[2]->x; V[2]->y = Vp[2]->y;
							V[3]->x = Vp[3]->x; V[3]->y = Vp[3]->y;
						}
						flags[j/4] = 1;
					}
					
				}
			}
		}
	}

	for (int i = 0; i < squares->total; i+=4)
	{
		if (!flags[i/4])
		{
			V[0] = (CvPoint*)cvGetSeqElem( squares, i );
			V[1] = (CvPoint*)cvGetSeqElem( squares, i+1 );
			V[2] = (CvPoint*)cvGetSeqElem( squares, i+2 );
			V[3] = (CvPoint*)cvGetSeqElem( squares, i+3 );

			// Kiem tra co nguoc chieu kim dong ho ko
			// Neu khong nguoc chieu kim dong ho thi hoan doi
			// Chinh lai huong cua la bai
			Line* l = MathHelper.ptDuongThang(V[0], V[1]);
			if (MathHelper.thePointLenLine(l, V[3]) > 0)
			{
				int temp = V[1]->x; V[1]->x = V[3]->x; V[3]->x  = temp;
					temp = V[1]->y; V[1]->y = V[3]->y; V[3]->y  = temp;
			}
			//MathHelper.SapDongHo(V);
			cvSeqPush(sqrSeq, V[0]);
			cvSeqPush(sqrSeq, V[1]);
			cvSeqPush(sqrSeq, V[2]);
			cvSeqPush(sqrSeq, V[3]);
		}
	}

	//cvClearSeq(squares);
	// release all the temporary images
	cvReleaseImage( &gray );
	cvReleaseImage( &pyr );
	//cvReleaseImage( &tgray );

	cvClearMemStorage(storage);
	return sqrSeq;
}
Ejemplo n.º 17
0
CvSeq * find_quad( CvSeq * src_contour, CvMemStorage *storage, int min_size)
{
    // stolen from icvGenerateQuads
    CvMemStorage * temp_storage = cvCreateChildMemStorage( storage );
    
    int flags = CV_CALIB_CB_FILTER_QUADS;
    CvSeq *dst_contour = 0;
    
    const int min_approx_level = 2, max_approx_level = MAX_CONTOUR_APPROX;
    int approx_level;
    for( approx_level = min_approx_level; approx_level <= max_approx_level; approx_level++ )
    {
        dst_contour = cvApproxPoly( src_contour, sizeof(CvContour), temp_storage,
                                    CV_POLY_APPROX_DP, (float)approx_level );
        // we call this again on its own output, because sometimes
        // cvApproxPoly() does not simplify as much as it should.
        dst_contour = cvApproxPoly( dst_contour, sizeof(CvContour), temp_storage,
                                    CV_POLY_APPROX_DP, (float)approx_level );

        if( dst_contour->total == 4 )
            break;
    }

    // reject non-quadrangles
    if( dst_contour->total == 4 && cvCheckContourConvexity(dst_contour) )
    {
        CvPoint pt[4];
        double d1, d2, p = cvContourPerimeter(dst_contour);
        double area = fabs(cvContourArea(dst_contour, CV_WHOLE_SEQ));
        double dx, dy;

        for( int i = 0; i < 4; i++ )
            pt[i] = *(CvPoint*)cvGetSeqElem(dst_contour, i);

        dx = pt[0].x - pt[2].x;
        dy = pt[0].y - pt[2].y;
        d1 = sqrt(dx*dx + dy*dy);

        dx = pt[1].x - pt[3].x;
        dy = pt[1].y - pt[3].y;
        d2 = sqrt(dx*dx + dy*dy);

        // philipg.  Only accept those quadrangles which are more square
        // than rectangular and which are big enough
        double d3, d4;
        dx = pt[0].x - pt[1].x;
        dy = pt[0].y - pt[1].y;
        d3 = sqrt(dx*dx + dy*dy);
        dx = pt[1].x - pt[2].x;
        dy = pt[1].y - pt[2].y;
        d4 = sqrt(dx*dx + dy*dy);
        if( !(flags & CV_CALIB_CB_FILTER_QUADS) ||
            (d3*1.1 > d4 && d4*1.1 > d3 && d3*d4 < area*1.5 && area > min_size &&
            d1 >= 0.15 * p && d2 >= 0.15 * p) )
        {
            // CvContourEx* parent = (CvContourEx*)(src_contour->v_prev);
            // parent->counter++;
            // if( !board || board->counter < parent->counter )
            //     board = parent;
            // dst_contour->v_prev = (CvSeq*)parent;
            //for( i = 0; i < 4; i++ ) cvLine( debug_img, pt[i], pt[(i+1)&3], cvScalar(200,255,255), 1, CV_AA, 0 );
            // cvSeqPush( root, &dst_contour );
            return dst_contour;
        }
    }
    
    return NULL;
}
Ejemplo n.º 18
0
CvSeq* findSquares4(IplImage *img, CvMemStorage* storage)
{
	CvSeq* contours;
	int i, c, l, N = 11;
	int thresh = 50;
	CvSize sz = cvSize(img->width & -2, img->height & -2);

	IplImage* timg = cvCloneImage(img);
	IplImage* gray = cvCreateImage(sz, 8, 1);
	IplImage* pyr = cvCreateImage(cvSize(sz.width / 2, sz.height / 2), 8, 3);
	IplImage* tgray;

	CvSeq* result;

	// 创建一个空序列用处储存轮廓角点
	CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);

	cvSetImageROI(timg, cvRect(0, 0, sz.width, sz.height));

	// 过滤噪音
	//cvPyrDown(timg, pyr, 7);
	tgray = cvCreateImage(sz, 8, 1);
	//cvPyrUp(pyr, timg, 7);

        // 红绿蓝3色分别提取
	for (int c = 0; c < 3; c++) {
		cvSetImageCOI(timg, c + 1);
		cvCopy(timg, tgray, 0);

                // 尝试各种阈值提取
		for (int l = 0; l < N; l++) {
			if (l == 0) {
				cvCanny(tgray, gray, 0, thresh, 5);
				cvDilate(gray, gray, 0, 1);
			} else {
				cvThreshold(tgray, gray, (l + 1) * 255 / N, 255,
				                CV_THRESH_BINARY);
			}

                        // 找到轮廓并存储在队列中
			cvFindContours(gray, storage, &contours,
			                sizeof(CvContour), CV_RETR_LIST,
			                CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));


                        // 遍历每一个轮廓
			while (contours) {
                                // 使用指定的精度逼近多边形曲线
				result = cvApproxPoly(contours,
				                sizeof(CvContour), storage,
				                CV_POLY_APPROX_DP,
				                cvContourPerimeter(contours) * 0.02, 0);
				if (result->total == 4
				                && fabs(
				                                cvContourArea(
				                                                result,
				                                                CV_WHOLE_SEQ))
				                                > 500
				                && fabs(
				                                cvContourArea(
				                                                result,
				                                                CV_WHOLE_SEQ))
				                                < 100000
				                && cvCheckContourConvexity(
				                                result))
				{

					double s = 0, t;
					for (int i = 0; i < 5; i++) {
						if (i >= 2) {
							t =
							                fabs(
							                                angle(
							                                                (CvPoint*) cvGetSeqElem(
							                                                                result,
							                                                                i),
							                                                (CvPoint *) cvGetSeqElem(
							                                                                result,
							                                                                i
							                                                                                - 2),
							                                                (CvPoint *) cvGetSeqElem(
							                                                                result,
							                                                                i
							                                                                                - 1)));
							s = s > t ? s : t;
						}
					}

                                        // 如果余弦值足够小, 可以认定角度为90度, 是直角
					if (s < 0.08) {
						for (int i = 0; i < 4; i++) {
							cvSeqPush(squares,
							                (CvPoint *) cvGetSeqElem(
							                                result,
							                                i));
						}
					}
				}
				contours = contours->h_next;
			}

		}
	}

	cvReleaseImage(&gray);
	cvReleaseImage (&pyr);
	cvReleaseImage(&tgray);
	cvReleaseImage(&timg);

	return squares;
}
bool findBlueNYelContour(IplImage* img, CvMemStorage* &storage,CvPoint &centre,int color){  //color :  blue==0,  yellow==1
	CvSeq* contours;  
	IplImage* timg = cvCloneImage( img ); // make a copy of input image  
	IplImage* gray = cvCreateImage( cvGetSize(timg), 8, 1 );   
	CvSeq* result;  

	CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );  
	cvNamedWindow("rgbContour",0);

	IplImage* hsv = cvCreateImage( cvGetSize(timg), 8, 3 );   
	cvSmooth(hsv,hsv,2,3);
	if(color==0){
		findLP_HSV_BLUE(timg,hsv);
		cvNamedWindow("hsv_license_blue",0);
	}
	else {
		findLP_HSV_YEL(timg,hsv);
			cvNamedWindow("hsv_license_yel",0);
	}
	//	

	cvNamedWindow("侵蚀前",0);
	cvShowImage("侵蚀前",hsv);
	cvErode(hsv,hsv,0,1);
	cvNamedWindow("侵蚀后",0);
	cvShowImage("侵蚀后",hsv);
	cvDilate(hsv,hsv,0,4);
	cvNamedWindow("膨胀后",0);
	cvShowImage("膨胀后",hsv);
	cvCvtColor(hsv,hsv,CV_HSV2RGB);
	

	cvCvtColor(hsv,gray,CV_RGB2GRAY);
	cvThreshold(gray,gray,100,255,0);
	CvContourScanner scanner = NULL;
	scanner = cvStartFindContours(gray,storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0));
	//ImagePreprocess::contourFinder(gray,0,hsv_blue,4000,10000);
	// find contours and store them all as a list  
/*	cvFindContours( gray, storage, &contours, sizeof(CvContour),  
		CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );  */
	// test each contour  
	int t=0;
	while (contours=cvFindNextContour(scanner))
	{
		// approximate contour with accuracy proportional  
		// to the contour perimeter  
		result = cvApproxPoly( contours, sizeof(CvContour), storage,  
			CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.04, 0 );  
		double tempArea = fabs(cvContourArea(result,CV_WHOLE_SEQ));
		double peri=cvContourPerimeter(result);
		CvRect rct=cvBoundingRect(result,1);
		// square contours should have 4 vertices after approximation  
		// relatively large area (to filter out noisy contours)  
		// and be convex.  
		// Note: absolute value of an area is used because  
		// area may be positive or negative - in accordance with the  
		// contour orientation  
		if(tempArea<3500 || tempArea>10000 || 
			result->total < 4 || result->total >10 ||
			peri<340 || peri>500
			|| rct.width/(1.0*rct.height)>3.85 || rct.width/(1.0*rct.height)<2.47 || rct.width<135 || rct.width>175
			){
			cvSubstituteContour(scanner,NULL);
	}
		else{  
			
	//	cout<<"height: "<<rct.height<<" width: "<<rct.width<<" rate: "<<rct.width/(rct.height*1.0)<<endl;
	//			cout<<"edge num: "<<result->total<<endl;
	//			cout<<"area : "<<fabs(cvContourArea(result,CV_WHOLE_SEQ))<<endl;
	//			cout<<"peri : "<<cvContourPerimeter(result)<<endl;
				CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 );
	//			cvDrawContours( timg, result, color, color, -1, 3, 8 );
	//			cvDrawContours( hsv, result, color, color, -1, 3, 8 );
				t++;
				//		contour = cvApproxPoly( contour, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 );         
				CvMat *region;
				region=(CvMat*)result; 
				CvMoments moments;  
				cvMoments( region, &moments,0 );
				int xc=moments.m10/moments.m00 , yc=moments.m01/moments.m00; 
				//		double angle3=atan(2*moments.mu11/(moments.mu20-moments.mu02))/2;
		//		cout<<"long: "<<longAxis<<"short: "<<shortAxis<<endl;
				centre=cvPoint(xc,yc);
	//			cvCircle( hsv, centre, 3, color, 3, 8, 0 );
	//			cvCircle( timg, centre, 3, color, 3, 8, 0 );
		}
		// take the next contour  
//		contours = contours->h_next;  			
	}
	result = cvEndFindContours(&scanner);
	cvShowImage("rgbContour",timg);
	if(color==0)
		cvShowImage("hsv_license_blue",hsv);
	else
		cvShowImage("hsv_license_yel",hsv);
	cvReleaseImage( &timg );  
	cvReleaseImage( &hsv );  
	cvReleaseImage( &gray );  
	if(0==t){
		return false;
	}
	else
		return true;
	// release all the temporary images  
	//	cvReleaseImage( &gray );  

	//cvReleaseImage( &hsv_blue );  
}  
Ejemplo n.º 20
0
void MouthContours::execute(IplImage* img, IplImage* drw, CvRect mouthSearch){

    CvSeq* contours;
    if(CV_IS_IMAGE(imgGrey)){
        cvReleaseImage(&imgGrey);
    }
    if(CV_IS_IMAGE(imgTempl)){
        cvReleaseImage(&imgTempl);
    }
    allocateOnDemand( &storageTeeth );
    allocateOnDemand( &imgTempl, cvSize( img->width, img->height ), IPL_DEPTH_8U, 3 );
    cvCopy( img,  imgTempl, 0 );
    allocateOnDemand( &imgGrey, cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );

    if(CV_IS_STORAGE((storageTeeth))){
        contours = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storageTeeth );
        cvCvtColor( imgTempl, imgGrey, CV_BGR2GRAY );
        int sigma = 1;
        int ksize = (sigma*5)|1;
        cvSetImageROI(imgGrey, mouthSearch);
        cvSetImageROI(drw, mouthSearch);

        cvSmooth( imgGrey , imgGrey, CV_GAUSSIAN, ksize, ksize, sigma, sigma);
        //cvEqualizeHist( small_img_grey, small_img_grey );
        cvCanny( imgGrey, imgGrey, 70, 70, 3 );

        cvDilate( imgGrey, imgGrey, NULL, 1 );
        cvErode( imgGrey, imgGrey, NULL, 1 );

        cvFindContours( imgGrey, storageTeeth, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
        if(CV_IS_SEQ(contours)){
            contours = cvApproxPoly( contours, sizeof(CvContour), storageTeeth, CV_POLY_APPROX_DP, 5, 1 ); 
            if( contours->total > 0 ){ 
                for( ;contours; contours = contours->h_next ){
                    if( contours->total <  4 )  
                        continue;     
                                        
                    cvDrawContours( drw, contours, CV_RGB(255,0,0), CV_RGB(0,255,0), 5, 1, CV_AA, cvPoint(0,0) );
                    MouthContours::TeethArcLength = cvArcLength(  contours, CV_WHOLE_SEQ, -1);
                    MouthContours::TeethAreaContour = cvContourArea( contours, CV_WHOLE_SEQ); 
                    time_t ltime;
                    struct tm *Tm;     
                    ltime=time(NULL);
                    Tm=localtime(&ltime); 
                    MouthContours::MouthHH = Tm->tm_hour;
                    MouthContours::MouthMM = Tm->tm_min;
                    MouthContours::MouthSS = Tm->tm_sec; 
                    
                }
            }else{
                    MouthContours::MouthHH = 0;
                    MouthContours::MouthMM = 0;
                    MouthContours::MouthSS = 0; 
                    MouthContours::TeethArcLength = 0;
                    MouthContours::TeethAreaContour = 0;
                }

        }else{
              MouthContours::MouthHH = 0;
                    MouthContours::MouthMM = 0;
                    MouthContours::MouthSS = 0; 
            MouthContours::TeethArcLength = 0;
            MouthContours::TeethAreaContour = 0;
        }
        
        cvClearMemStorage( storageTeeth );
         
    }
    cvResetImageROI(imgGrey);
    cvResetImageROI(drw);
    
}
Ejemplo n.º 21
0
/** Returns a CvSeq (An OpenCV sequence) of Tetris pieces detected in an image.
   Based on the OpenCV example of identifying a square.  Modified to detect
   L-shaped Tetris pieces.  Effectiveness dependent upon thresholds of edge
   dectection and camera being positioned orthogonal to the Tetris piece.
 */
CvSeq* Camera::findTetris( IplImage* img, CvMemStorage* storage )
{
	thresh = 50;
    CvSeq* contours;
    int i, c, l, N = 11;
    CvSize sz = cvSize( img->width & -2, img->height & -2 );

	/// Copy of image so that the detection is non-destructive
    IplImage* timg = cvCloneImage( img );

	/// Gray scale needed
	IplImage* gray = cvCreateImage( sz, 8, 1 );

	/// Smaller version to do scaling
    IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
    IplImage* tgray;
    CvSeq* result;
    double s, t;

    // create empty sequence that will contain points -
    /// 6 points per tetris piece (the vertices)
    CvSeq* tetrisPieces = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );

    // select the maximum region of interest (ROI) in the image
    // with the width and height divisible by 2.  What is the biggest
    // size of the object.
    cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));

    // down-scale and upscale the image to filter out the noise
	// I get the filter, but why down and upscale?
    cvPyrDown( timg, pyr, 7 );
    cvPyrUp( pyr, timg, 7 );
    tgray = cvCreateImage( sz, 8, 1 );

    /// find pieces in every color plane of the image
    for( c = 0; c < 3; c++ )
    {
        /// extract the c-th color plane
        cvSetImageCOI( timg, c+1 );
        cvCopy( timg, tgray, 0 );

        /// try several threshold levels
        for( l = 0; l < N; l++ )
        {
            /// hack: use Canny instead of zero threshold level.
            /// Canny helps to catch tetrisPieces with gradient shading
            if( l == 0 )
            {
                // apply Canny. Take the upper threshold from slider
                // and set the lower to 0 (which forces edges merging)
                cvCanny( tgray, gray, 50, 120, 5 );
                // dilate canny output to remove potential
                // holes between edge segments
                cvDilate( gray, gray, 0, 1 );
            }
            else
            {
                // apply threshold if l!=0:
                //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
            }

            // find contours and store them all as a list
            cvFindContours( gray, storage, &contours, sizeof(CvContour),
                CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

            // test each contour
            while( contours )
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                result = cvApproxPoly( contours, sizeof(CvContour), storage,
                    CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );

				/* Tetris pieces have 6 vertices.  The approximation of large
				 * area is used to filter out "noisy contours."
                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation*/
                if( result->total == 6 &&
                    fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 1000 &&
					fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 10000 )
                {
                    s = 0;

                    for( i = 0; i < 7; i++ )
                    {
                        // find minimum angle between joint
                        // edges (maximum of cosine)
                        if( i >= 2 )
                        {
                            t = fabs(angle(
                            (CvPoint*)cvGetSeqElem( result, i ),
                            (CvPoint*)cvGetSeqElem( result, i-2 ),
                            (CvPoint*)cvGetSeqElem( result, i-1 )));
                            s = s > t ? s : t;
                        }
                    }

                    // if cosines of all angles are small
                    // (all angles are ~90 degree) then write quandrange
                    // vertices to resultant sequence
                    if( s < 0.3 )
                        for( i = 0; i < 6; i++ )
                            cvSeqPush( tetrisPieces,
                                (CvPoint*)cvGetSeqElem( result, i ));
                }

                // take the next contour
                contours = contours->h_next;
            }
        }
    }

    // release all the temporary images
    cvReleaseImage( &gray );
    cvReleaseImage( &pyr );
    cvReleaseImage( &tgray );
    cvReleaseImage( &timg );

    return tetrisPieces;
}
Ejemplo n.º 22
0
void connectComponent(IplImage* src, const int poly_hull0, const float perimScale, int *num,
		vector<CvRect> &rects, vector<CvPoint> &centers) {

	/*
	 * Pre : "src"        :is the input image
	 *       "poly_hull0" :is usually set to 1
	 *       "perimScale" :defines how big connected component will be retained, bigger
	 *                     the number, more components are retained (100)
	 *
	 * Post: "num"        :defines how many connected component was found
	 *       "rects"      :the bounding box of each connected component
	 *       "centers"    :the center of each bounding box
	 */

	rects.clear();
	centers.clear();

	CvMemStorage* mem_storage = NULL;
	CvSeq* contours = NULL;

	// Clean up
	cvMorphologyEx(src, src, 0, 0, CV_MOP_OPEN, 1);
	cvMorphologyEx(src, src, 0, 0, CV_MOP_CLOSE, 1);

	// Find contours around only bigger regions
	mem_storage = cvCreateMemStorage(0);

	CvContourScanner scanner = cvStartFindContours(src, mem_storage, sizeof(CvContour),
			CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
	CvSeq* c;
	int numCont = 0;

	while ((c = cvFindNextContour(scanner)) != NULL) {

		double len = cvContourPerimeter(c);

		// calculate perimeter len threshold
		double q = (double) (src->height + src->width) / perimScale;

		// get rid of blob if its perimeter is too small
		if (len < q) {

			cvSubstituteContour(scanner, NULL);

		} else {

			// smooth its edge if its large enough
			CvSeq* c_new;
			if (poly_hull0) {

				// polygonal approximation
				c_new = cvApproxPoly(c, sizeof(CvContour), mem_storage, CV_POLY_APPROX_DP, 2, 0);

			} else {

				// convex hull of the segmentation
				c_new = cvConvexHull2(c, mem_storage, CV_CLOCKWISE, 1);

			}

			cvSubstituteContour(scanner, c_new);

			numCont++;
		}
	}

	contours = cvEndFindContours(&scanner);

	// Calc center of mass and/or bounding rectangles
	if (num != NULL) {

		// user wants to collect statistics
		int numFilled = 0, i = 0;

		for (i = 0, c = contours; c != NULL; c = c->h_next, i++) {

			if (i < *num) {

				// bounding retangles around blobs

				rects.push_back(cvBoundingRect(c));

				CvPoint center = cvPoint(rects[i].x + rects[i].width / 2, rects[i].y
						+ rects[i].height / 2);
				centers.push_back(center);

				numFilled++;
			}
		}

		*num = numFilled;

	}

	cvReleaseMemStorage(&mem_storage);

}
Ejemplo n.º 23
0
void ObjectTracker::findConnectedComponents( IplImage* mask, int poly1_hull2 /* = 0 */, double perimScale /* = 0.25 */, int* num /* = NULL */, CvRect* bbs /* = NULL */, CvPoint* centers /* = NULL */ ) {
    int cvContourApproxLevel = 2;

    static CvMemStorage *mem_storage = NULL;
    static CvSeq *contours = NULL;

    if (mem_storage == NULL) {
        mem_storage = cvCreateMemStorage(0);
    } else {
        cvClearMemStorage(mem_storage);
    }

    CvContourScanner scanner = cvStartFindContours(mask, mem_storage, sizeof(CvContour),
                               CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    CvSeq *c;
    int numCont = 0;
    while ((c = cvFindNextContour(scanner)) != NULL) {
        double len = cvContourPerimeter(c);

        double q = (mask->height + mask->width) * perimScale;

        if (len < q) {
            cvSubstituteContour(scanner, NULL);
        } else {
            if (poly1_hull2) {
                CvSeq *c_new;
                if (poly1_hull2 == 1) {
                    c_new = cvApproxPoly(c, sizeof(CvContour), mem_storage, CV_POLY_APPROX_DP,
                                         cvContourApproxLevel, 0);
                } else if (poly1_hull2 == 2) {
                    c_new = cvConvexHull2(c, mem_storage, CV_CLOCKWISE, 1);
                }
                cvSubstituteContour(scanner, c_new);
            }
            numCont++;
        }
    }

    contours = cvEndFindContours(&scanner);

    const CvScalar CVX_WHITE = CV_RGB(0xff,0xff,0xff);
    const CvScalar CVX_BLACK = CV_RGB(0x00,0x00,0x00);

    cvZero(mask);
    IplImage *maskTemp;

    // CALC CENTER OF MASS AND/OR BOUNDING RECTANGLES
    //
    if(num != NULL) {
        //User wants to collect statistics
        //
        int N = *num, numFilled = 0, i=0;
        CvMoments moments;
        double M00, M01, M10;
        maskTemp = cvCloneImage(mask);
        for(i=0, c=contours; c != NULL; c = c->h_next,i++ ) {
            if(i < N) {
                // Only process up to *num of them
                //
                cvDrawContours(
                    maskTemp,
                    c,
                    CVX_WHITE,
                    CVX_WHITE,
                    -1,
                    CV_FILLED,
                    8
                );
                // Find the center of each contour
                //
                if(centers != NULL) {
                    cvMoments(maskTemp,&moments,1);
                    M00 = cvGetSpatialMoment(&moments,0,0);
                    M10 = cvGetSpatialMoment(&moments,1,0);
                    M01 = cvGetSpatialMoment(&moments,0,1);
                    centers[i].x = (int)(M10/M00);
                    centers[i].y = (int)(M01/M00);
                }
                //Bounding rectangles around blobs
                //
                if(bbs != NULL) {
                    bbs[i] = cvBoundingRect(c);
                }
                cvZero(maskTemp);
                numFilled++;
            }
            // Draw filled contours into mask
            //
            cvDrawContours(
                mask,
                c,
                CVX_WHITE,
                CVX_WHITE,
                -1,
                CV_FILLED,
                8
            );
        } //end looping over contours
        *num = numFilled;
        cvReleaseImage( &maskTemp);
    }
    // ELSE JUST DRAW PROCESSED CONTOURS INTO THE MASK
    //
    else {
        // The user doesn't want statistics, just draw the contours
        //
        for( c=contours; c != NULL; c = c->h_next ) {
            cvDrawContours(
                mask,
                c,
                CVX_WHITE,
                CVX_BLACK,
                -1,
                CV_FILLED,
                8
            );
        }
    }
}
Ejemplo n.º 24
0
//--------------------------------------------------------------------------------
int ContourFinder::findContours(	ofxCvGrayscaleImage&  input,
									int minArea,
									int maxArea,
									int nConsidered,
									double hullPress,	
									bool bFindHoles,
									bool bUseApproximation) {
	reset();

	// opencv will clober the image it detects contours on, so we want to
    // copy it into a copy before we detect contours.  That copy is allocated
    // if necessary (necessary = (a) not allocated or (b) wrong size)
	// so be careful if you pass in different sized images to "findContours"
	// there is a performance penalty, but we think there is not a memory leak
    // to worry about better to create mutiple contour finders for different
    // sizes, ie, if you are finding contours in a 640x480 image but also a
    // 320x240 image better to make two ContourFinder objects then to use
    // one, because you will get penalized less.

	if( inputCopy.width == 0 ) {
		inputCopy.allocate( input.width, input.height );
		inputCopy = input;
	} else {
		if( inputCopy.width == input.width && inputCopy.height == input.height ) 
			inputCopy = input;
		else {
			// we are allocated, but to the wrong size --
			// been checked for memory leaks, but a warning:
			// be careful if you call this function with alot of different
			// sized "input" images!, it does allocation every time
			// a new size is passed in....
			//inputCopy.clear();
			inputCopy.allocate( input.width, input.height );
			inputCopy = input;
		}
	}

	CvSeq* contour_list = NULL;
	contour_storage = cvCreateMemStorage( 1000 );
	storage	= cvCreateMemStorage( 1000 );

	CvContourRetrievalMode  retrieve_mode
        = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL;
	cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list,
                    sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE );
	
	CvSeq* contour_ptr = contour_list;

	nCvSeqsFound = 0;

	// put the contours from the linked list, into an array for sorting
	while( (contour_ptr != NULL) )  {
		CvBox2D box = cvMinAreaRect2(contour_ptr);
		int objectId; // If the contour is an object, then objectId is its ID
		objectId = (bTrackObjects)? templates->getTemplateId(box.size.width,box.size.height): -1;
		
		if(objectId != -1 ) { //If the blob is a object
			Blob blob		= Blob();
			blob.id			= objectId;
			blob.isObject	= true;
			float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ );

			cvMoments( contour_ptr, myMoments );
		
			// this is if using non-angle bounding box
			CvRect rect	= cvBoundingRect( contour_ptr, 0 );
			blob.boundingRect.x      = rect.x;
			blob.boundingRect.y      = rect.y;
			blob.boundingRect.width  = rect.width;
			blob.boundingRect.height = rect.height;

			//For anglebounding rectangle
			blob.angleBoundingBox=box;
			blob.angleBoundingRect.x	  = box.center.x;
			blob.angleBoundingRect.y	  = box.center.y;
			blob.angleBoundingRect.width  = box.size.height;
			blob.angleBoundingRect.height = box.size.width;
			blob.angle = box.angle;

			//TEMPORARY INITIALIZATION TO 0, Will be calculating afterwards.This is to prevent sending wrong data
			blob.D.x = 0;
			blob.D.y = 0;
			blob.maccel = 0;

			// assign other parameters
			blob.area                = fabs(area);
			blob.hole                = area < 0 ? true : false;
			blob.length 			 = cvArcLength(contour_ptr);
		
			blob.centroid.x			 = (myMoments->m10 / myMoments->m00);
			blob.centroid.y 		 = (myMoments->m01 / myMoments->m00);
			blob.lastCentroid.x 	 = 0;
			blob.lastCentroid.y 	 = 0;

			// get the points for the blob:
			CvPoint           pt;
			CvSeqReader       reader;
			cvStartReadSeq( contour_ptr, &reader, 0 );
	
    		for( int j=0; j < contour_ptr->total; j++ ) {
				CV_READ_SEQ_ELEM( pt, reader );
				blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) );
			}
			blob.nPts = blob.pts.size();

			objects.push_back(blob);
			
		} else if(bTrackBlobs) { // SEARCH FOR BLOBS
			float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) );
			if( (area > minArea) && (area < maxArea) ) {
				Blob blob=Blob();
				float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ );
				cvMoments( contour_ptr, myMoments );
				
				// this is if using non-angle bounding box
				CvRect rect	= cvBoundingRect( contour_ptr, 0 );
				blob.boundingRect.x      = rect.x;
				blob.boundingRect.y      = rect.y;
				blob.boundingRect.width  = rect.width;
				blob.boundingRect.height = rect.height;
				
				//Angle Bounding rectangle
				blob.angleBoundingRect.x	  = box.center.x;
				blob.angleBoundingRect.y	  = box.center.y;
				blob.angleBoundingRect.width  = box.size.height;
				blob.angleBoundingRect.height = box.size.width;
				blob.angle = box.angle;
				
				// assign other parameters
				blob.area                = fabs(area);
				blob.hole                = area < 0 ? true : false;
				blob.length 			 = cvArcLength(contour_ptr);
				// AlexP
				// The cast to int causes errors in tracking since centroids are calculated in
				// floats and they migh land between integer pixel values (which is what we really want)
				// This not only makes tracking more accurate but also more fluid
				blob.centroid.x			 = (myMoments->m10 / myMoments->m00);
				blob.centroid.y 		 = (myMoments->m01 / myMoments->m00);
				blob.lastCentroid.x 	 = 0;
				blob.lastCentroid.y 	 = 0;
				
				// get the points for the blob:
				CvPoint           pt;
				CvSeqReader       reader;
				cvStartReadSeq( contour_ptr, &reader, 0 );
				
    			for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, contour_ptr->total); j++ ) {
					CV_READ_SEQ_ELEM( pt, reader );
					blob.pts.push_back( ofPoint((float)pt.x, (float)pt.y) );
				}
				blob.nPts = blob.pts.size();
				
				blobs.push_back(blob);
			}
		} 
		contour_ptr = contour_ptr->h_next;
	}
		
	if(bTrackFingers) {  // SEARCH FOR FINGERS
		CvPoint*		PointArray;
		int*			hull;
		int				hullsize;
		
		if (contour_list)
			contour_list = cvApproxPoly(contour_list, sizeof(CvContour), storage, CV_POLY_APPROX_DP, hullPress, 1 );
			
		for( ; contour_list != 0; contour_list = contour_list->h_next ){
			int count = contour_list->total; // This is number point in contour
				
			CvRect rect = cvContourBoundingRect(contour_list, 1);
			
			if ( (rect.width*rect.height) > 300 ){		// Analize the bigger contour
				CvPoint center;
				center.x = rect.x+rect.width/2;
				center.y = rect.y+rect.height/2;
				
				PointArray = (CvPoint*)malloc( count*sizeof(CvPoint) ); // Alloc memory for contour point set.
				hull = (int*)malloc(sizeof(int)*count);	// Alloc memory for indices of convex hull vertices.
					
				cvCvtSeqToArray(contour_list, PointArray, CV_WHOLE_SEQ); // Get contour point set.
					
				// Find convex hull for curent contour.
				cvConvexHull(	PointArray,
								count,
								NULL,
								CV_COUNTER_CLOCKWISE,
								hull,
								&hullsize);
					
				int upper = 640, lower = 0;
				for	(int j=0; j<hullsize; j++) {
					int idx = hull[j]; // corner index
					if (PointArray[idx].y < upper) 
						upper = PointArray[idx].y;
					if (PointArray[idx].y > lower) 
						lower = PointArray[idx].y;
				}
				
				float cutoff = lower - (lower - upper) * 0.1f;
				// find interior angles of hull corners
				for (int j=0; j<hullsize; j++) {
					int idx = hull[j]; // corner index
					int pdx = idx == 0 ? count - 1 : idx - 1; //  predecessor of idx
					int sdx = idx == count - 1 ? 0 : idx + 1; // successor of idx
						
					cv::Point v1 = cv::Point(PointArray[sdx].x - PointArray[idx].x, PointArray[sdx].y - PointArray[idx].y);
					cv::Point v2 = cv::Point(PointArray[pdx].x - PointArray[idx].x, PointArray[pdx].y - PointArray[idx].y);
						
					float angle = acos( (v1.x*v2.x + v1.y*v2.y) / (norm(v1) * norm(v2)) );
						
					// low interior angle + within upper 90% of region -> we got a finger
					if (angle < 1 ){ //&& PointArray[idx].y < cutoff) {
						Blob blob = Blob();
						
						//float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ );
						//cvMoments( contour_ptr, myMoments );
						
						// this is if using non-angle bounding box
						//CvRect rect	= cvBoundingRect( contour_ptr, 0 );
						blob.boundingRect.x      = PointArray[idx].x-5;
						blob.boundingRect.y      = PointArray[idx].y-5;
						blob.boundingRect.width  = 10;
						blob.boundingRect.height = 10;
						
						//Angle Bounding rectangle
						blob.angleBoundingRect.x	  = PointArray[idx].x-5;
						blob.angleBoundingRect.y	  = PointArray[idx].y-5;
						blob.angleBoundingRect.width  = 10;
						blob.angleBoundingRect.height = 10;
						blob.angle = atan2((float) PointArray[idx].x - center.x , (float) PointArray[idx].y - center.y);
						
						// assign other parameters
						//blob.area                = fabs(area);
						//blob.hole                = area < 0 ? true : false;
						//blob.length 			 = cvArcLength(contour_ptr);
						// AlexP
						// The cast to int causes errors in tracking since centroids are calculated in
						// floats and they migh land between integer pixel values (which is what we really want)
						// This not only makes tracking more accurate but also more fluid
						blob.centroid.x			 = PointArray[idx].x;//(myMoments->m10 / myMoments->m00);
						blob.centroid.y 		 = PointArray[idx].y;//(myMoments->m01 / myMoments->m00);
						blob.lastCentroid.x 	 = 0;
						blob.lastCentroid.y 	 = 0;
						
						fingers.push_back(blob);
					}
				}
				// Free memory.
				free(PointArray);
				free(hull);
			}
		}
	}
	
	nBlobs = blobs.size();
	nFingers = fingers.size();
	nObjects = objects.size();
	
	// Free the storage memory.
	// Warning: do this inside this function otherwise a strange memory leak
	if( contour_storage != NULL )
		cvReleaseMemStorage(&contour_storage);
	
	if( storage != NULL )
		cvReleaseMemStorage(&storage);

	return (bTrackFingers)? nFingers:nBlobs;
}
Ejemplo n.º 25
0
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
CvSeq* findSquares4( IplImage* img, CvMemStorage* storage )
{
    CvSeq* contours;
    int i, c, l, N = 11;
    CvSize sz = cvSize( img->width & -2, img->height & -2 );
    IplImage* timg = cvCloneImage( img ); // make a copy of input image
    IplImage* gray = cvCreateImage( sz, 8, 1 );
    IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
    IplImage* tgray;
    CvSeq* result;
    double s, t;
    // create empty sequence that will contain points -
    // 4 points per square (the square's vertices)
    CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );

    // select the maximum ROI in the image
    // with the width and height divisible by 2
    cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));

    // down-scale and upscale the image to filter out the noise
    cvPyrDown( timg, pyr, 7 );
    cvPyrUp( pyr, timg, 7 );
    tgray = cvCreateImage( sz, 8, 1 );

    // find squares in every color plane of the image
    for( c = 0; c < 3; c++ )
    {
        // extract the c-th color plane
        cvSetImageCOI( timg, c+1 );
        cvCopy( timg, tgray, 0 );

        // try several threshold levels
        for( l = 0; l < N; l++ )
        {
            // hack: use Canny instead of zero threshold level.
            // Canny helps to catch squares with gradient shading
            if( l == 0 )
            {
                // apply Canny. Take the upper threshold from slider
                // and set the lower to 0 (which forces edges merging)
                cvCanny( tgray, gray, 0, thresh, 5 );
                // dilate canny output to remove potential
                // holes between edge segments
                cvDilate( gray, gray, 0, 1 );
            }
            else
            {
                // apply threshold if l!=0:
                //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
            }

            // find contours and store them all as a list
            cvFindContours( gray, storage, &contours, sizeof(CvContour),
                CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

            // test each contour
            while( contours )
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                result = cvApproxPoly( contours, sizeof(CvContour), storage,
                    CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
                // square contours should have 4 vertices after approximation
                // relatively large area (to filter out noisy contours)
                // and be convex.
                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if( result->total == 4 &&
                    cvContourArea(result,CV_WHOLE_SEQ,0) > 500 &&
                    cvCheckContourConvexity(result) )
                {
                    s = 0;

                    for( i = 0; i < 5; i++ )
                    {
                        // find minimum angle between joint
                        // edges (maximum of cosine)
                        if( i >= 2 )
                        {
                            t = fabs(angle(
                            (CvPoint*)cvGetSeqElem( result, i ),
                            (CvPoint*)cvGetSeqElem( result, i-2 ),
                            (CvPoint*)cvGetSeqElem( result, i-1 )));
                            s = s > t ? s : t;
                        }
                    }

                    // if cosines of all angles are small
                    // (all angles are ~90 degree) then write quandrange
                    // vertices to resultant sequence
                    if( s < 0.3 )
                        for( i = 0; i < 4; i++ )
                            cvSeqPush( squares,
                                (CvPoint*)cvGetSeqElem( result, i ));
                }

                // take the next contour
                contours = contours->h_next;
            }
        }
    }

    // release all the temporary images
    cvReleaseImage( &gray );
    cvReleaseImage( &pyr );
    cvReleaseImage( &tgray );
    cvReleaseImage( &timg );

    return squares;
}
vector<float> AverageColorFeatureExtractor::extractFeatures(IplImage * image, IplImage * segmented) {
	g_image = image;
	
	/* For Debugging Purposes: Show the window with the images in them */
	cvNamedWindow( "Images", 2);	
	//cvShowImage("Images", g_image);
	//cvShowImage("Segmentation", segmented);

	/* We'll create some storage structures to store the contours we get later */
	
	IplImage * sixforty = cvCreateImage( cvGetSize(image), 8 , 1);
	cvResize(segmented, sixforty);
	
	CvSeq * first_contour = NULL;
	CvMemStorage * g_storage = cvCreateMemStorage();
	
	/* Perform the contour finding */
	cvFindContours( sixforty, g_storage, &first_contour, sizeof(CvContour), CV_RETR_LIST );
	
	/* Find the contour with the largest area
	   This contour has the highest likelyhood of surrounding the object we care about */
	CvSeq * largest = 0;
	int l_area = 0;

	for(CvSeq * c=first_contour; c!=NULL; c=c->h_next ){
	CvRect rect = cvBoundingRect( c );
		if(rect.width*rect.height > l_area) {
			l_area = rect.width*rect.height;
			largest = c;
		}
	}

	/* For Debugging purposes: create image to see resulting contour */
	IplImage * view = cvCreateImage( cvGetSize(sixforty), 8, 3);	
	cvZero(view);
	
	vector<float> features;
	
	if(largest) {
		cvDrawContours(view, largest, cvScalarAll(255), cvScalarAll(255), 0, 2, 8);
		cvShowImage( "View", view);
	
		/* Polygonal Approximation */
		CvSeq * result; // Will hold approx
		CvMemStorage * storage = cvCreateMemStorage();
		result = cvApproxPoly( 	largest, 
				sizeof(CvContour),
				storage,
				CV_POLY_APPROX_DP,
				cvContourPerimeter(largest)*0.015 
				);
	
	/*
  	   The parameter value above (set to perimeter * 0.01 ) found by experimentation
	   The value is smaller than the one used for L shape or the square finder
	   Because we wan't some element of noisyness. (It determines when the Algorithm stops adding points)
	*/

	/* For Debugging purposes: create image to see resulting contour */
		IplImage * mask = cvCreateImage( cvGetSize(sixforty), IPL_DEPTH_8U, 1);	
		cvZero(mask);
		cvDrawContours(mask, result, cvScalarAll(255), cvScalarAll(255), 0, -1, 8);
		IplImage * sendMask = cvCreateImage (cvGetSize(image), IPL_DEPTH_8U, 1);
		cvResize(mask, sendMask);
		//cvShowImage( "Result", sendMask );

		cout << image->nChannels << " " << image->imageSize << " " << sendMask->imageSize << " " << sendMask->depth << endl;	
	
		CvScalar avg = cvAvg( image, sendMask );	
	
		//cvWaitKey();

		/* Until we actually can send out a real feature vector: export a dummy */
	
	
		//for(int i=0; i<bins; i++)
		//	features.push_back( histogram[i] );
	
		features.push_back(floor((19*avg.val[0])/255));
		features.push_back(floor((19*avg.val[1])/255));
		features.push_back(floor((19*avg.val[2])/255));

		// Cleanup the temp files
		cvReleaseImage( &mask );
		cvReleaseImage( &sendMask );
		cvReleaseMemStorage( &storage );
	}
	
	cvReleaseImage( &view );
	cvReleaseImage( &sixforty );
	cvReleaseMemStorage( &g_storage );
	return features;
}
Ejemplo n.º 27
0
Archivo: map.cpp Proyecto: Metalab/CGSG
const PolygonList& ViennaMap::loadFragment(int fragX, int fragY) {
  //TODO: keep track of the images that are being loaded so we don't issue
  //two load requests for the same picture. this will be important if 
  //we are used in a multi-threaded environment
  IplImage *img = getImage(fragX, fragY);

  if (SDL_mutexP(cvRessourcesGuard) == -1)
    throw "could not acquire cvRessourcesGuard";

  //get one color channel and set white to zero
  cvSplit(img, tempBinarizedImage, NULL, NULL, NULL);
  cvThreshold(tempBinarizedImage, tempBinarizedImage, 250, 255, CV_THRESH_TOZERO_INV);
  
  //find polygons
  CvSeq *contours, *polys;
  cvFindContours(tempBinarizedImage, cvMemStorage, &contours, sizeof(CvContour),
                  CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
  polys = cvApproxPoly(contours, sizeof(CvContour), cvMemStorage, CV_POLY_APPROX_DP, 1, 1);

  //create MapFragment
  MapFragment *frag = new MapFragment();

  //read polygons
  for (; polys; polys = polys->h_next) {
    if (polys->total < 3) continue;
    Polygon* polygon = new Polygon(polys->total); 
    bool incomplete = false;

    CvPoint *point;

    for (int i=0; i < polys->total; i++) {
      point = (CvPoint*)cvGetSeqElem(polys, i);
      int x, y;
      x = point->x + fragX*fragmentImageWidth;
      y = point->y + fragY*fragmentImageHeight;
      (*polygon)[i].x = x;
      (*polygon)[i].y = y;

      if (x == 1 || y == 1 || x == fragmentImageWidth-2 || y == fragmentImageHeight-2)
        incomplete = true;
    }

    if (!incomplete)
      frag->polygons.push_back(polygon);
    else
      frag->incompletePolygons.push_back(polygon);
  }

  //clean up
  cvClearMemStorage(cvMemStorage);
  cvReleaseImage(&img);

  if (SDL_mutexV(cvRessourcesGuard) == -1)
    throw "could not release cvRessourcesGuard";

  if (SDL_mutexP(fragmentGuard) == -1)
    throw "could not acquire fragmentGuard";

  //TODO: tryCompletePolygons
  //throw "not implemented";
  
  //add map fragment to list
  fragments.push_back(frag);

  if (SDL_mutexV(fragmentGuard) == -1)
    throw "could not release fragmentGuard";

  return frag->polygons;
}
Ejemplo n.º 28
0
int main(int argc, char* argv[])
{
    int i, j;
    CvMemStorage* storage = cvCreateMemStorage(0);
    IplImage* img = cvCreateImage( cvSize(w,w), 8, 1 );
    IplImage* img32f = cvCreateImage( cvSize(w,w), IPL_DEPTH_32F, 1 );
    IplImage* img32s = cvCreateImage( cvSize(w,w), IPL_DEPTH_32S, 1 );
    IplImage* img3 = cvCreateImage( cvSize(w,w), 8, 3 );
    (void)argc; (void)argv;

    help();
    cvZero( img );

    for( i=0; i < 6; i++ )
    {
        int dx = (i%2)*250 - 30;
        int dy = (i/2)*150;
        CvScalar white = cvRealScalar(255);
        CvScalar black = cvRealScalar(0);

        if( i == 0 )
        {
            for( j = 0; j <= 10; j++ )
            {
                double angle = (j+5)*CV_PI/21;
                cvLine(img, cvPoint(cvRound(dx+100+j*10-80*cos(angle)),
                    cvRound(dy+100-90*sin(angle))),
                    cvPoint(cvRound(dx+100+j*10-30*cos(angle)),
                    cvRound(dy+100-30*sin(angle))), white, 3, 8, 0);
            }
        }

        cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(100,70), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(10,5), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+150, dy+150), cvSize(40,10), 0, 0, 360, black, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+27, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
        cvEllipse( img, cvPoint(dx+273, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 );
    }

    cvNamedWindow( "image", 1 );
    cvShowImage( "image", img );
    cvConvert( img, img32f );
    findCComp( img32f );
    cvConvert( img32f, img32s );

    cvFindContours( img32s, storage, &contours, sizeof(CvContour),
                    CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

    //cvFindContours( img, storage, &contours, sizeof(CvContour),
    //                CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );


    {
    const char* attrs[] = {"recursive", "1", 0};
    cvSave("contours.xml", contours, 0, 0, cvAttrList(attrs, 0));
    contours = (CvSeq*)cvLoad("contours.xml", storage, 0, 0);
    }

    // comment this out if you do not want approximation
    contours = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 );

    cvNamedWindow( "contours", 1 );
    cvCreateTrackbar( "levels+3", "contours", &levels, 7, on_trackbar );

    {
        CvRNG rng = cvRNG(-1);

        CvSeq* tcontours = contours;
        cvCvtColor( img, img3, CV_GRAY2BGR );
        while( tcontours->h_next )
            tcontours = tcontours->h_next;

        for( ; tcontours != 0; tcontours = tcontours->h_prev )
        {
            CvScalar color;
            color.val[0] = cvRandInt(&rng) % 256;
            color.val[1] = cvRandInt(&rng) % 256;
            color.val[2] = cvRandInt(&rng) % 256;
            color.val[3] = cvRandInt(&rng) % 256;
            cvDrawContours(img3, tcontours, color, color, 0, -1, 8, cvPoint(0,0));
            if( tcontours->v_next )
            {
                color.val[0] = cvRandInt(&rng) % 256;
                color.val[1] = cvRandInt(&rng) % 256;
                color.val[2] = cvRandInt(&rng) % 256;
                color.val[3] = cvRandInt(&rng) % 256;
                cvDrawContours(img3, tcontours->v_next, color, color, 1, -1, 8, cvPoint(0,0));
            }
        }

    }

    cvShowImage( "colored", img3 );
    on_trackbar(0);
    cvWaitKey(0);
    cvReleaseMemStorage( &storage );
    cvReleaseImage( &img );
    cvReleaseImage( &img32f );
    cvReleaseImage( &img32s );
    cvReleaseImage( &img3 );

    return 0;
}
Ejemplo n.º 29
0
int cam() //calling main
{
    int hdims = 16;
    printf("I am main");
    CvCapture* capture = cvCreateCameraCapture(1); //determining usb camera
    CvHistogram *hist = 0;
    CvMemStorage* g_storage = NULL;
    Display *display=construct_display();
    int x,y, tmpx=0, tmpy=0, chk=0;
    IplImage* image=0;
    IplImage* lastimage1=0;
    IplImage* lastimage=0;
    IplImage* diffimage;
    IplImage* bitimage;
    IplImage* src=0,*hsv=0,*hue=0,*backproject=0;
    IplImage* hsv1=0,*hue1=0,*histimg=0,*frame=0,*edge=0;
    float* hranges;
    cvNamedWindow( "CA", CV_WINDOW_AUTOSIZE ); //display window 3
    //Calculation of Histogram//
    cvReleaseImage(&src);
    src= cvLoadImage("images/skin.jpg"); //taking patch
    while(1)
    {
        frame = cvQueryFrame( capture ); //taking frame by frame for image prcessing
        int j=0;
        float avgx=0;
        float avgy=0;
        if( !frame ) break;
        //#########################Background Substraction#########################//
        if(!image)
        {
            image=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
            bitimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
            diffimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
            lastimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
        }
        cvCvtColor(frame,image,CV_BGR2GRAY);
        if(!lastimage1)
        {
            lastimage1=cvLoadImage("images/img.jpg");
        }
        cvCvtColor(lastimage1,lastimage,CV_BGR2GRAY);
        cvAbsDiff(image,lastimage,diffimage);
        cvThreshold(diffimage,bitimage,65,225,CV_THRESH_BINARY);
        cvInRangeS(bitimage,cvScalar(0),cvScalar(30),bitimage);
        cvSet(frame,cvScalar(0,0,0),bitimage);
        cvReleaseImage(&hsv);
        hsv= cvCreateImage( cvGetSize(src), 8, 3 );
        cvReleaseImage(&hue);
        hue= cvCreateImage( cvGetSize(src), 8, 1);
        cvCvtColor(src,hsv,CV_BGR2HSV);
        cvSplit(hsv,hue,0,0,0);
        float hranges_arr[] = {0,180};
        hranges = hranges_arr;
        hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
        cvCalcHist(&hue, hist, 0, 0 );
        cvThreshHist( hist, 100 );
        //#############################Display histogram##############################//
        cvReleaseImage(&histimg);
        histimg = cvCreateImage( cvSize(320,200), 8, 3 );
        cvZero( histimg );
        int bin_w = histimg->width / hdims;
        //#### Calculating the Probablity of Finding the skin with in-built method ###//
        if(0)
        {
            free (backproject);
            free (hsv1);
            free (hue1);
        }
        cvReleaseImage(&backproject);
        backproject= cvCreateImage( cvGetSize(frame), 8, 1 );
        cvReleaseImage(&hsv1);
        hsv1 = cvCreateImage( cvGetSize(frame), 8, 3);
        cvReleaseImage(&hue1);
        hue1 = cvCreateImage( cvGetSize(frame), 8, 1);
        cvCvtColor(frame,hsv1,CV_BGR2HSV);
        cvSplit(hsv1,hue1,0,0,0);
        cvCalcBackProject( &hue1, backproject, hist );
        cvSmooth(backproject,backproject,CV_GAUSSIAN);
        cvSmooth(backproject,backproject,CV_MEDIAN);
        if( g_storage == NULL )
        g_storage = cvCreateMemStorage(0);
        else
        cvClearMemStorage( g_storage );
        CvSeq* contours=0;
        CvSeq* result =0;
        cvFindContours(backproject, g_storage, &contours );
        if(contours)
        {
            result=cvApproxPoly(contours, sizeof(CvContour), g_storage,
            CV_POLY_APPROX_DP, 7, 1);
        }
        cvZero( backproject);
        for( ; result != 0; result = result->h_next )
        {
            double area = cvContourArea( result );
            cvDrawContours( backproject,result, CV_RGB(255,255, 255), CV_RGB(255,0, 255)
            , -1,CV_FILLED, 8 );
            for( int i=1; i<=result-> total; i++ )
            {
                if(i>=1 and abs(area)>300)
                {
                    CvPoint* p2 = CV_GET_SEQ_ELEM( CvPoint, result, i );
                    if(1)
                    {
                        avgx=avgx+p2->x;
                        avgy=avgy+p2->y;
                        j=j+1;
                        cvCircle(backproject,cvPoint(p2->x,p2->y ),10,
                        cvScalar(255,255,255));
                    }
                }
            }
        }
        cvCircle( backproject, cvPoint(avgx/j, avgy/j ), 40, cvScalar(255,255,255) );
        x = ( avgx/j );
        y = ( avgy/j );
        x=( (x*1240)/640 )-20;
        y=( (y*840)/480 )-20;
        if ( (abs(tmpx-x)>6 or abs(tmpy-y)>6 ) and j )
        {
            tmpx = x;
            tmpy = y;
            chk=0;
        }
        else chk++;
        mouse_move1( tmpx, tmpy, display );
        if ( chk==10 )
        {
            mouse_click( 5, 2, display );
            mouse_click( 5, 3, display );
        }
        cvSaveImage( "final.jpg", frame );
        cvSaveImage( "final1.jpg", backproject );
        cvShowImage( "CA", backproject );
        char c = cvWaitKey(33);
        if( c == 27 )
        break; //function break and destroying windows if press <escape> key
    }
    cvReleaseCapture( &capture );
    cvDestroyWindow( "CA" );
}
Ejemplo n.º 30
0
/**************************************
 * Definition: Finds squares in an image with the given minimum size
 *
 * (Taken from the API and modified slightly)
 * Doesn't require exactly 4 sides, convexity or near 90 deg angles either ('findBlobs')
 *
 * Parameters: the image to find squares in and the minimum area for a square
 *
 * Returns:    a squares_t linked list
 **************************************/
squares_t* Camera::findSquares(IplImage *img, int areaThreshold) {
    CvSeq* contours;
    CvMemStorage *storage;
    int i, j, area;
    CvPoint ul, lr, pt, centroid;
    CvSize sz = cvSize( img->width, img->height);
    IplImage * canny = cvCreateImage(sz, 8, 1);
    squares_t *sq_head, *sq, *sq_last;
        CvSeqReader reader;
    
    // Create storage
    storage = cvCreateMemStorage(0);
    
    // Pyramid images for blurring the result
    IplImage* pyr = cvCreateImage(cvSize(sz.width/2, sz.height/2), 8, 1);

    CvSeq* result;
    double s, t;

    // Create an empty sequence that will contain the square's vertices
    CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);
    
    // Select the maximum ROI in the image with the width and height divisible by 2
    cvSetImageROI(img, cvRect(0, 0, sz.width, sz.height));
    
    // Down and up scale the image to reduce noise
    cvPyrDown( img, pyr, CV_GAUSSIAN_5x5 );
    cvPyrUp( pyr, img, CV_GAUSSIAN_5x5 );

    // Apply the canny edge detector and set the lower to 0 (which forces edges merging) 
    cvCanny(img, canny, 0, 50, 3);
        
    // Dilate canny output to remove potential holes between edge segments 
    cvDilate(canny, canny, 0, 2);
        
    // Find the contours and store them all as a list
    // was CV_RETR_EXTERNAL
    cvFindContours(canny, storage, &contours, sizeof(CvContour), 
                   CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
            
    // Test each contour to find squares
    while (contours) {
        // Approximate a contour with accuracy proportional to the contour perimeter
        result = cvApproxPoly(contours, sizeof(CvContour), storage, 
                              CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.1, 0 );
        // Note: absolute value of an area is used because
        // area may be positive or negative - in accordance with the
        // contour orientation
        if (result->total >= 4 && 
            fabs(cvContourArea(result,CV_WHOLE_SEQ,0)) > areaThreshold) {
            s=0;
            for(i=0; i<5; i++) {
                // Find the minimum angle between joint edges (maximum of cosine)
                if(i >= 2) {
                    t = fabs(ri_angle((CvPoint*)cvGetSeqElem(result, i),
                                      (CvPoint*)cvGetSeqElem(result, i-2),
                                      (CvPoint*)cvGetSeqElem( result, i-1 )));
                    s = s > t ? s : t;
                }
            }

            for( i = 0; i < 4; i++ ) {
                cvSeqPush(squares, (CvPoint*)cvGetSeqElem(result, i));
            }
        }    
        // Get the next contour
        contours = contours->h_next;
    }

        // initialize reader of the sequence
    cvStartReadSeq(squares, &reader, 0);
    sq_head = NULL; sq_last = NULL; sq = NULL;
    // Now, we have a list of contours that are squares, find the centroids and area
    for(i=0; i<squares->total; i+=4) {
        // Find the upper left and lower right coordinates
        ul.x = 1000; ul.y = 1000; lr.x = 0; lr.y = 0;
        for(j=0; j<4; j++) {
            CV_READ_SEQ_ELEM(pt, reader);
            // Upper Left
            if(pt.x < ul.x)
                ul.x = pt.x;
            if(pt.y < ul.y)
                ul.y = pt.y;
            // Lower right
            if(pt.x > lr.x)
                lr.x = pt.x;
            if(pt.y > lr.y)
                lr.y = pt.y;
        }

        // Find the centroid
        centroid.x = ((lr.x - ul.x) / 2) + ul.x;
        centroid.y = ((lr.y - ul.y) / 2) + ul.y;

        // Find the area
        area = (lr.x - ul.x) * (lr.y - ul.y);

        // Add it to the storage
        sq = new squares_t;
        // Fill in the data
        sq->area = area;
        sq->center.x = centroid.x;
        sq->center.y = centroid.y;
        sq->next = NULL;
        if(sq_last == NULL) 
            sq_head = sq;   
        else 
            sq_last->next = sq;
        sq_last = sq;
    }
    
    // Release the temporary images and data
    cvReleaseImage(&canny);
    cvReleaseImage(&pyr);
    cvReleaseMemStorage(&storage);
    return sq_head;
}