Example #1
0
static gboolean
defects_are_horizontal (CvSeq *defects)
{
  /* Gets the angle between the start point
     of the first defect and the end point of
     the second and checks if it between 45 and
     90 degrees which means that the defects
     are horizontal (their opening is pointing
     sideways).*/

  gfloat x, angle;
  CvPoint *p1, *p2;
  CvConvexityDefect *d1, *d2;

  d1 = CV_GET_SEQ_ELEM (CvConvexityDefect, defects, 0);
  d2 = CV_GET_SEQ_ELEM (CvConvexityDefect, defects, 1);
  p1 = d1->start;
  p2 = d2->end;

  x = (gfloat) (p1->x - p2->x);
  angle = ABS (atan (((gfloat) (p1->y - p2->y)) / x));

  if (angle < M_PI_4 || angle > M_PI_2)
    {
      return FALSE;
    }

  return TRUE;
}
Example #2
0
QString Selection::serialisation()
{
    QString datas;

    CvSeq *tmp = contour;
    CvPoint *point;

    datas = "{" + Zone::sub_serialisation();
    datas += ",contour=[";

    do {
        datas += "{flags=" + QString::number(tmp->flags) + ",seq=[";
        for(int j = 0; j < tmp->total; ++j) {
            point = CV_GET_SEQ_ELEM(CvPoint, tmp, j);
            datas += "{x=" + QString::number(point->x) + "," + "y=" + QString::number(point->y) + "},";
        }

        datas.remove(datas.size() -1, 1);
        datas += "]},";
    } while((tmp = tmp->h_next) != NULL);

    datas.remove(datas.size() -1, 1);
    datas += "]}";
    return datas;
}
Example #3
0
//This boolean function returns whether the (x,y) point is inside the given contour.
char pointIsInsideContour(CvContour *contour, int x, int y){
	//We know that a point is inside a contour if we can find one point in the contour that is immediately to the left,
	//one that is immediately on top, one that is immediately to the right and one that is immediately below the (x,y) point.
	//We will update the boolean variables below when these are found:
	char found_left=0, found_top=0, found_right=0, found_bottom=0;
	int count, i; //Variables used for iteration
	CvPoint *contourPoint; //A pointer to a single contour poit.
	
	if(!contour)return 0; //Don't bother doing anything if there is no contour.
	
	count = contour->total; //The total field holds the number of points in the contour.

	for(i=0;i<count;i++){ //So, for every point in the contour...
		//We retrieve a pointer to the point at index i using the useful macro CV_GET_SEQ_ELEM
		contourPoint = (CvPoint *)CV_GET_SEQ_ELEM(CvPoint,contour,i);
		
		if(contourPoint->x == x){ //If the point is on the same vertical plane as (x,y)...
			if(contourPoint->y < y)found_top = 1; //and is above (x,y), we found the top.
			else found_bottom = 1; //Otherwise, it's the bottom.
		}
		if(contourPoint->y == y){ //Do the same thing for the horizontal axis...
			if(contourPoint->x < x)found_left = 1;
			else found_right = 1;
		}
	}
	
	return found_left && found_top && found_right && found_bottom; //Did we find all four points?
}
Example #4
0
Contours::Contours(CvSeq * contour){
	this->c = contour;
	CvPoint * p=CV_GET_SEQ_ELEM(CvPoint ,contour,0);
	this->x=p->x;
	this->y=p->y;
	this->per=-1;
	this->area=-1;
}
////////////////////////////////////////////////////////////////
//  ラベリング処理
////////////////////////////////////////////////////////////////
void cv_Labelling(
    IplImage *src_img,       //入力画像(8Bitモノクロ)
    IplImage *dst_img        //出力画像(8Bit3chカラー)
) {

    CvMemStorage *storage = cvCreateMemStorage (0);
    CvSeq *contours = NULL;

    if (src_img == NULL)
        return;

    // 画像の二値化【判別分析法(大津の二値化)】
    cvThreshold (src_img, src_img, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);

    // 輪郭の検出(戻り値は取得した輪郭の全個数)
    int find_contour_num = cvFindContours (
                               src_img,                     // 入力画像
                               storage,                      // 抽出された輪郭を保存する領域
                               &contours,                  // 一番外側の輪郭へのポインタへのポインタ
                               sizeof (CvContour),      // シーケンスヘッダのサイズ
                               CV_RETR_EXTERNAL,       // 抽出モード
                               // CV_RETR_EXTERNAL - 最も外側の輪郭のみ抽出
                               // CV_RETR_LIST - 全ての輪郭を抽出し,リストに追加
                               // CV_RETR_CCOMP - 全ての輪郭を抽出し,
                               // 二つのレベルを持つ階層構造を構成する.
                               // 1番目のレベルは連結成分の外側の境界線,
                               // 2番目のレベルは穴(連結成分の内側に存在する)の境界線.
                               // CV_RETR_TREE - 全ての輪郭を抽出し,
                               // 枝分かれした輪郭を完全に表現する階層構造を構成する.
                               CV_CHAIN_APPROX_NONE    // CV_CHAIN_APPROX_SIMPLE:輪郭の折れ線の端点を取得
                               // CV_CHAIN_APPROX_NONE: 輪郭の全ての点を取得
                               // Teh-Chinチェーンの近似アルゴリズム中の一つを適用する
                               // CV_CHAIN_APPROX_TC89_L1
                               // CV_CHAIN_APPROX_TC89_KCOS
                           );

    if (contours != NULL) {
        //処理後画像を0(黒)で初期化
        cvSet(dst_img, CV_RGB( 0, 0, 0 ));
        //輪郭の描画
        DrawNextContour(dst_img, contours, 1);
    }

    cvDrawContours( dst_img, max_perimeter_contor, CV_RGB( 255, 0, 0 ), CV_RGB( 255, 0, 0 ), 0, 2);
        for ( int i = 0; i < max_perimeter_contor->total; i++) {
        CvPoint *point = CV_GET_SEQ_ELEM (CvPoint, max_perimeter_contor, i);
        //std::cout << "x:" << point->x << ", y:" << point->y  << std::endl;
    }
	//輪郭を構成する頂点座標を取得
    /*for ( int i = 0; i < Contour->total; i++) {
        CvPoint *point = CV_GET_SEQ_ELEM (CvPoint, Contour, i);
        std::cout << "x:" << point->x << ", y:" << point->y  << std::endl;

    }*/
    //メモリストレージの解放
    cvReleaseMemStorage (&storage);
}
Example #6
0
/*accept window for configuring perimeter and area filters
 * if set by zone
 * */
Contours::Contours(CvSeq * contour,CvRect window){
	this->c = contour;
	CvPoint * p=CV_GET_SEQ_ELEM(CvPoint ,contour,0);
	this->x=p->x + window.x;
	this->y=p->y + window.y;
	this->per=-1;
	this->area=-1;

}
void whu_MyHand::whu_GetFingerAngle(double &m_Angle)
{
	whu_Context.WaitNoneUpdateAll();
	//double m_Angle;
	int min_val;
	int FingerNum=0;//得到的指尖的个数//
	static int m_HandOpen=0;
	static int m_handClosed=0;
	memcpy(imgDepth16u->imageData,whu_DepthMD.Data(),640*480*2);
	cvConvertScale(imgDepth16u,depthShow,255/4096.0,0);      //转化为灰度图
	min_val = min_front(depthShow);    //取得灰度图最前端
	get_gray_hand(depthShow, min_val);

	CvRect rect = cvBoundingRect( contours, 0 );//返回一个2d矩形的点集合//得到包含轮廓的最小矩形
	for(int i = 20;i < contours->total;i++)
	{
		CvPoint *p = CV_GET_SEQ_ELEM(CvPoint,contours,i);
		CvPoint *a = CV_GET_SEQ_ELEM(CvPoint,contours,i-10);
		CvPoint *b = CV_GET_SEQ_ELEM(CvPoint,contours,i+10);
		if((double)((a->x-b->x)*(a->x-b->x)+(a->y-b->y)*(a->y-b->y))/(double)((a->x-p->x)*(a->x-p->x)+(a->y-p->y)*(a->y-p->y))<0.9)//&&cvGetReal2D(cpimg,(a->x+b->x)/2,(a->y+b->y)/2) >1)      //&&*(uchar*)(depthShow->imageData+((a->y+b->y)/2)*depthShow->widthStep)[(a->x+b->x)/2]==1
		{
			if((p->x-rect.x-(rect.width)/2)*(p->x-rect.x-(rect.width)/2)+(p->y-rect.y-(rect.height)/2)*(p->y-rect.y-(rect.height)/2) > rect.height*rect.height/10)//???
			{
				FingerNum++;
			}
		}
	}
	if (FingerNum>5)
	{
			if (m_Angle<150)
			{
				m_Angle=m_Angle+15;
			}
	} 
	else
	{
		if (m_Angle>60)
		{
			m_Angle=m_Angle-15;
		}
	}
	

}
Example #8
0
struct point*
get_contour_points_from_image_with_size (const GdkPixbuf *image,
                                         int             *size)
{
  IplImage *ipl_image, *ipl_gray;
  CvMemStorage *contours;
  CvSeq *first_contour;
  CvScalar black, white;
  struct point *result;

  black = cvScalarAll (0);
  white = cvScalarAll (255);

  ipl_image = pixbuf2ipl (image);

  ipl_gray = cvCreateImage (cvGetSize (ipl_image),
                            ipl_image->depth,
                            N_CHANNELS_GRAY);

  cvCvtColor (ipl_image, ipl_gray, CV_BGR2GRAY);
  cvThreshold (ipl_gray, ipl_gray, 127, 255, CV_THRESH_BINARY|CV_THRESH_OTSU);
  cvSmooth (ipl_gray, ipl_gray, CV_GAUSSIAN, 15, 15, 0, 0);

  contours = cvCreateMemStorage (0);
  first_contour = NULL;
  cvFindContours (ipl_gray,
                  contours,
                  &first_contour,
                  sizeof (CvContour),
                  CV_RETR_LIST,
                  CV_CHAIN_APPROX_NONE,
                  cvPoint (0,0));

  result = (struct point*) malloc (sizeof (struct point) * first_contour->total);
  for (int i = 0; i < first_contour->total; ++i)
    {
      CvPoint *contour_point;

      contour_point = CV_GET_SEQ_ELEM (CvPoint, first_contour, i);

      result[i].x = contour_point->x;
      result[i].y = contour_point->y;
    }

  *size = first_contour->total;

  cvReleaseImage (&ipl_image);
  cvReleaseImage (&ipl_gray);
  cvReleaseMemStorage (&contours);

  return result;
}
Example #9
0
int mvContours::find_contour_and_check_errors(IplImage* img) {
    if (m_contours != NULL) {
        cvClearSeq(m_contours);
        m_contours = NULL;
    }

    // find the contours
    bin_contours.start();
    int n_contours = cvFindContours (
        img,
        m_storage,
        &m_contours,
        sizeof(CvContour),
        CV_RETR_EXTERNAL,
        CV_CHAIN_APPROX_SIMPLE
    );

    int last_x=-1, last_y=-1;
    if (m_contours == NULL) {
        goto FIND_CONTOUR_ERROR;
    }
    
    // check that the contour does not coincide with the sides of the image for more than 20% of its perimeter
    for (int i = 0; i < m_contours->total; i++) {
        CvPoint* p = CV_GET_SEQ_ELEM (CvPoint, m_contours, i);
        if (p->x == 1 || p->x == 398) {
            if (p->x == last_x && abs(p->y-last_y) > img->height/3) {
                DEBUG_PRINT ("find_contour: contour shares vertical side with image (x=%d). Discarding.\n", last_x);
                goto FIND_CONTOUR_ERROR;
            }
        }
        if ((p->y == 1) || (p->y == 298)) {
            if (p->y == last_y && abs(p->x-last_x) > img->width/3) {
                DEBUG_PRINT ("find_contour: contour shares horizontal side with image (y=%d). Discarding.\n", last_y);
                goto FIND_CONTOUR_ERROR;
            }
        }

        last_x = p->x;
        last_y = p->y;
    }
    bin_contours.stop();
    return n_contours;

FIND_CONTOUR_ERROR:
    if (m_contours != NULL) {
        cvClearSeq(m_contours);
        m_contours = NULL;
    }
    bin_contours.stop();
    return -1;
}
Example #10
0
//--------------------------------------------------------------
void testApp::draw(){

	int l = 0;
	
	switch (mode) {
			
		case MODE_PROCESS:
			
			ofBackground(100,100,100);
			
			// draw the incoming, the grayscale, the bg and the thresholded difference
			ofSetColor(0xffffff);
			
			colorImg  .draw( 320*((l++)%3), (l/3)*240, 320, 240 );
			medianImg .draw( 320*((l++)%3), (l/3)*240, 320, 240 );
			grayImage .draw( 320*((l++)%3), (l/3)*240, 320, 240 );
			cannyImage.draw( 320*((l++)%3), (l/3)*240, 320, 240 );

			hsvImage  .draw( 320*((l++)%3), (l/3)*240, 320, 240 );
			satImage  .draw( 320*((l++)%3), (l/3)*240, 320, 240 );
			trsImage  .draw( 320*((l++)%3), (l/3)*240, 320, 240 );
			
			ofSetColor(0xffffff);
			char reportStr[1024];
			sprintf(reportStr, "bg subtraction and blob detection\npress ' ' to capture bg\nthreshold %i (press: +/-)\n, fps: %f", threshold, ofGetFrameRate());
			ofDrawBitmapString(reportStr, 20, 600);
			
			break;
			
		case MODE_DRAWING:
			
			if(!erase_bg) {
				ofEnableAlphaBlending();
				ofSetColor(0xFF, 0xFF, 0xFF, clearBGAlphaValue);
			} else {
				ofSetColor(0xFF, 0xFF, 0xFF);
			}
			ofFill();
			paperTexture.draw(0,0,ofGetWidth(), ofGetHeight());
			
			
//			glEnable(GL_BLEND); 
//			glBlendFunc(GL_ZERO, GL_ONE_MINUS_SRC_COLOR);
//			grayImage.draw(0, 0, ofGetWidth(), ofGetHeight());
//			glDisable(GL_BLEND);

			ofNoFill();
			ofEnableAlphaBlending();
			
			// FILLS
			
			if (draw_fills && fillContours) {
				
				
				unsigned char* cc = colorImg.getPixels();
				float rr, gg, bb;
				
				CvSeq* contour = fillContours;  
				
				if (doFillsApproxValue) {
				
					while (contour!=NULL) {
						
						CvSeq* approx = cvApproxPoly(contour, sizeof(CvContour), approxStorage, CV_POLY_APPROX_DP, fillsApproxValue);
						
						CvRect br = cvBoundingRect(approx, 0);
						int cx = (br.x+br.width /2.0) / (float) kx;
						int cy = (br.y+br.height/2.0) / (float) ky;
						ofSetColor((int) cc[cx + cy*cw + 0], 
								   (int) cc[cx + cy*cw + 1], 
								   (int) cc[cx + cy*cw + 2], 
								   fillsAlphaValue);
						ofFill();
						ofBeginShape();
						for (int j = 0; j < approx->total; j++){
							CvPoint* p = CV_GET_SEQ_ELEM(CvPoint, approx, j);
							ofVertex(p->x, p->y);
						}
						ofEndShape(true);
						ofNoFill();
						
						contour = contour->h_next;
					}
				
				} else {
				
					// SMOTH FILL CONTOUR
					while (contour!=NULL) {
						for (int j = 0; j < contour->total; j++){
							CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
							CvPoint* p2 = CV_GET_SEQ_ELEM(CvPoint, contour, (j+1) % contour->total);
							p1->x += p2->x; p1->x /= 2.0;
							p1->y += p2->y; p1->y /= 2.0;
						}
						contour = contour->h_next;
					}
					
					contour = fillContours;  
					
					while (contour!=NULL) {
						
						// GET FILL COLOR FROM IMAGE
						CvRect br = cvBoundingRect(contour, 0);
						int cx = (br.x+br.width /2.0) / (float) kx;
						int cy = (br.y+br.height/2.0) / (float) ky;
						ofSetColor((int) cc[cx + cy*cw + 0], 
								   (int) cc[cx + cy*cw + 1], 
								   (int) cc[cx + cy*cw + 2], 
								   fillsAlphaValue);
						// DRAW FILL
						ofFill();
						ofBeginShape();
						for (int j = 0; j < contour->total; j++){
							CvPoint* p = CV_GET_SEQ_ELEM(CvPoint, contour, j);
							ofVertex(p->x, p->y);
						}
						ofEndShape(true);
						ofNoFill();
						
						contour = contour->h_next;
					}
				}
			}
			
			// LINES
			
			if (draw_edges && lines) {
//				glColor4f(0.4, 0.2, 0.1, erase_bg ? 0.5 : 0.2);
				glColor4f(1.0, 0.9, 0.8, erase_bg ? 0.5 : 0.2);
				ofSetLineWidth(2.0);
				#if PROBABILISTIC_LINE
					for( int i = 0; i <lines->total; i++ ) {
						
						CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
						int dx = line[1].x - line[0].x; 
						int dy = line[1].y - line[0].y; 
						line[0].x -= dx*0.1;
						line[0].y -= dy*0.1;
						line[1].x += dx*0.1;
						line[1].y += dy*0.1;
						ofLine( line[0].x*kx, line[0].y*ky, line[1].x*kx, line[1].y*ky );
					}
				#else
					for( int i = 0; i < MIN(lines->total, 25); i++ ) {
						
						float* line = (float*)cvGetSeqElem(lines,i);
						float rho = line[0];
						float theta = line[1];
						CvPoint pt1, pt2;
						double a = cos(theta), b = sin(theta);
						double x0 = a*rho, y0 = b*rho;
						pt1.x = cvRound(x0 + 1000*(-b));
						pt1.y = cvRound(y0 + 1000*( a));
						pt2.x = cvRound(x0 - 1000*(-b));
						pt2.y = cvRound(y0 - 1000*( a));
						ofLine( pt1.x*kx, pt1.y*ky, pt2.x*kx, pt2.y*ky );
					}
				#endif				
				ofSetLineWidth(1.0);
			}
			
			// APPROX EDGES
			
			if (draw_approx && edgeContours) {
				
				CvSeq* contour = edgeContours;
				ofSetColor(0x60, 0x20, 0x10, approxAlphaValue);
				
				while (contour!=NULL) {
					
					CvSeq* approx = cvApproxPoly(contour, sizeof(CvContour), approxStorage, CV_POLY_APPROX_DP, approxValue);

					ofBeginShape();
					for (int j = 0; j < approx->total; j++){
						CvPoint* p = CV_GET_SEQ_ELEM(CvPoint, approx, j);
						ofVertex(p->x, p->y);
					}
					ofEndShape(true);
					
					contour = contour->h_next;
				}
			}
			
			
			// CONTOURS
			if (draw_contours && edgeContours) {
				
				// SMOOTH
				for (int i=0;i<contourSmoothValue; ++i) {
					CvSeq* contour = edgeContours;
					while (contour!=NULL) {
						for (int j = 0; j < contour->total; j++){
							CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
							CvPoint* p2 = CV_GET_SEQ_ELEM(CvPoint, contour, (j+1) % contour->total);
							p1->x += p2->x; p1->x /= 2.0;
							p1->y += p2->y; p1->y /= 2.0;
						}
						contour = contour->h_next;
					}
				}
				
				CvSeq* contour = edgeContours;
				
				ofSetColor(0x60, 0x20, 0x10, contourAlphaValue);
				
				while (contour!=NULL) {
					
					ofBeginShape();
					for (int j = 0; j < contour->total; j++){
						CvPoint* p = CV_GET_SEQ_ELEM(CvPoint, contour, j);
						ofVertex(p->x, p->y);
					}
					ofEndShape(true);
					
					contour = contour->h_next;
				}
			}
			
			
		
			break;
			
		default:
			break;
	}

}
Example #11
0
double * computeFDFeatures(IplImage* segmented, int N)
{
	cvNamedWindow( "Edge",1);
	cvMoveWindow("Capture", 100, 10);
	
	IplImage* img_edge = cvCreateImage( cvGetSize(segmented), 8, 1 );
	IplImage* img_8uc3 = cvCreateImage( cvGetSize(segmented), 8, 3 );
	cvThreshold( segmented, img_edge, 128, 255, CV_THRESH_BINARY );
	CvMemStorage* storage = cvCreateMemStorage();
	CvSeq* first_contour = NULL;
	int Nc = cvFindContours(
				img_edge,
				storage,
				&first_contour,
				sizeof(CvContour),
				CV_RETR_EXTERNAL // Try all four values and see what happens
				);
	int i;
	int n=0;
	int best=0;
	int current=0;
	int n2;
	double Scale;
	double * Features;
	Features=(double *)malloc(sizeof(double)*N);
	
	//malloc error checking
	
	fftw_complex *contour;
	fftw_complex *FD;
	fftw_plan plan_forward;
	//printf( "Total Contours Detected: %d\n", Nc );
	//Find max contour
	for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) {
		if(c->total>current);
			best=n;
		n++;
	}
	//fprintf(stderr,"best is %d\n",best);
	n=0;
	for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) {
		if(n==best && c->total >20){
			cvCvtColor( segmented, img_8uc3, CV_GRAY2BGR );
			cvDrawContours(
				img_8uc3,
				c,
				CVX_RED,
				CVX_BLUE,
				1, // Try different values of max_level, and see what happens
				4,
				4
				);
			//printf("Contour #%d\n", n );
			cvShowImage("Edge", img_8uc3 );
		//	cvWaitKey(0);
		//	printf("%d elements:\n", c->total );
			contour=(fftw_complex*) fftw_malloc(sizeof(fftw_complex)*(c->total));
			FD=(fftw_complex*) fftw_malloc(sizeof(fftw_complex)*(c->total));

			for( int i=0; i<c->total; ++i ) {
				CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, c, i );
			//	printf("(%d,%d)\n", p->x, p->y );
				//assemble complex representation here
				contour[i][0]=p->x;
				contour[i][1]=p->y;
			}

			plan_forward=fftw_plan_dft_1d(c->total,contour,FD,FFTW_FORWARD,FFTW_ESTIMATE);
			fftw_execute(plan_forward);
			//do fft
			n2=c->total/2;
			Scale=(double)sqrt(pow(FD[1][0],2)+pow(FD[1][1],2));
			//reduce to 10 coefficients
			//normalize
			if(N+2>=c->total)
			{
				fprintf(stderr,"Contour Is too small");
				return 0;
			}
			//positive frequency components
			for(i=0;i<N/2;i++)
			{
				//fftshift stuff
				Features[i]=(double)sqrt(pow(FD[i+2][0],2)+pow(FD[i+2][1],2))/Scale;
			}
			for(i=0;i<N/2;i++)
			{
				Features[i+N/2]=(double)sqrt(pow(FD[N-1-i][0],2)+pow(FD[N-1-i][1],2))/Scale;
			}
			//cvWaitKey(0);
		}
		n++;
	}
	//try downspampling later
	
	//printf("Finished all contours.\n");
	//destroy fftw_plan
	cvCvtColor( segmented, img_8uc3, CV_GRAY2BGR );
	cvShowImage( "Edge", img_8uc3 );
	//cvWaitKey(0);
	//cvDestroyWindow( "Edge" );
	cvReleaseImage( &segmented );
	cvReleaseImage( &img_8uc3 );
	cvReleaseImage( &img_edge );
	return Features;
	
}
Example #12
0
/*
 * This Function segments a worm.
 * It requires that certain information be present in the WormAnalysisData struct Worm
 * It requires Worm->Boundary be full
 * It requires that Params->NumSegments be greater than zero
 *
 */
int SegmentWorm(WormAnalysisData* Worm, WormAnalysisParam* Params){
	if (cvSeqExists(Worm->Boundary) == 0){
		printf("Error! No boundary found in SegmentWorm()\n");
		return -1;
	}




	Worm->Segmented->NumSegments=Params->NumSegments;

	/***Clear Out any stale Segmented Information Already in the Worm Structure***/
	ClearSegmentedInfo(Worm->Segmented);

	Worm->Segmented->Head=Worm->Head;
	Worm->Segmented->Tail=Worm->Tail;

	/*** It would be nice to check that Worm->Boundary exists ***/

	/*** Clear Out Scratch Storage ***/
	cvClearMemStorage(Worm->MemScratchStorage);


	/*** Slice the boundary into left and right components ***/
	if (Worm->HeadIndex==Worm->TailIndex) printf("Error! Worm->HeadIndex==Worm->TailIndex in SegmentWorm()!\n");
	CvSeq* OrigBoundA=cvSeqSlice(Worm->Boundary,cvSlice(Worm->HeadIndex,Worm->TailIndex),Worm->MemScratchStorage,1);
	CvSeq* OrigBoundB=cvSeqSlice(Worm->Boundary,cvSlice(Worm->TailIndex,Worm->HeadIndex),Worm->MemScratchStorage,1);

	if (OrigBoundA->total < Params->NumSegments || OrigBoundB->total < Params->NumSegments ){
		printf("Error in SegmentWorm():\n\tWhen splitting  the original boundary into two, one or the other has less than the number of desired segments!\n");
		printf("OrigBoundA->total=%d\nOrigBoundB->total=%d\nParams->NumSegments=%d\n",OrigBoundA->total,OrigBoundB->total,Params->NumSegments);
		printf("Worm->HeadIndex=%d\nWorm->TailIndex=%d\n",Worm->HeadIndex,Worm->TailIndex);
		return -1; /** Andy make this return -1 **/

	}

	cvSeqInvert(OrigBoundB);


	/*** Resample One of the Two Boundaries so that both are the same length ***/

	//Create sequences to store the Normalized Boundaries
	CvSeq* NBoundA=	cvCreateSeq(CV_SEQ_ELTYPE_POINT,sizeof(CvSeq),sizeof(CvPoint),Worm->MemScratchStorage);
	CvSeq* NBoundB=cvCreateSeq(CV_SEQ_ELTYPE_POINT,sizeof(CvSeq),sizeof(CvPoint),Worm->MemScratchStorage);

	//Resample L&R boundary to have the same number of points as min(L,R)
	if (OrigBoundA->total > OrigBoundB->total){
		resampleSeq(OrigBoundA,NBoundA,OrigBoundB->total );
		NBoundB=OrigBoundB;
	}else{
		resampleSeq(OrigBoundB,NBoundB,OrigBoundA->total );
		NBoundA=OrigBoundA;
	}
	//Now both NBoundA and NBoundB are the same length.



	/*
	 * Now Find the Centerline
	 *
	 */

	/*** Clear out Stale Centerline Information ***/
	cvClearSeq(Worm->Centerline);

	/*** Compute Centerline, from Head To Tail ***/
	FindCenterline(NBoundA,NBoundB,Worm->Centerline);



	/*** Smooth the Centerline***/
	CvSeq* SmoothUnresampledCenterline = smoothPtSequence (Worm->Centerline, 0.5*Worm->Centerline->total/Params->NumSegments, Worm->MemScratchStorage);

	/*** Note: If you wanted to you could smooth the centerline a second time here. ***/


	/*** Resample the Centerline So it has the specified Number of Points ***/
	//resampleSeq(SmoothUnresampledCenterline,Worm->Segmented->Centerline,Params->NumSegments);

	resampleSeqConstPtsPerArcLength(SmoothUnresampledCenterline,Worm->Segmented->Centerline,Params->NumSegments);

	/** Save the location of the centerOfWorm as the point halfway down the segmented centerline **/
	Worm->Segmented->centerOfWorm= CV_GET_SEQ_ELEM( CvPoint , Worm->Segmented->Centerline, Worm->Segmented->NumSegments / 2 );

	/*** Remove Repeat Points***/
	//RemoveSequentialDuplicatePoints (Worm->Segmented->Centerline);

	/*** Use Marc's Perpendicular Segmentation Algorithm
	 *   To Segment the Left and Right Boundaries and store them
	 */
	SegmentSides(OrigBoundA,OrigBoundB,Worm->Segmented->Centerline,Worm->Segmented->LeftBound,Worm->Segmented->RightBound);
	return 0;

}
Example #13
0
int main( int argc, char** argv )
{
    IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
#if !ARRAY        
    CvMemStorage* storage = cvCreateMemStorage(0);
#endif

    cvNamedWindow( "rect & circle", 1 );
        
    for(;;)
    {
        char key;
        int i, count = rand()%100 + 1;
        CvPoint pt0, pt;
        CvBox2D box;
        CvPoint2D32f box_vtx[4];
        CvPoint2D32f center;
        CvPoint icenter;
        float radius;
#if !ARRAY            
        CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),
                                     sizeof(CvPoint), storage );
        for( i = 0; i < count; i++ )
        {
            pt0.x = rand() % (img->width/2) + img->width/4;
            pt0.y = rand() % (img->height/2) + img->height/4;
            cvSeqPush( ptseq, &pt0 );
        }
#ifndef _EiC /* unfortunately, here EiC crashes */
        box = cvMinAreaRect2( ptseq, 0 );
#endif
        cvMinEnclosingCircle( ptseq, &center, &radius );
#else
        CvPoint* points = (CvPoint*)malloc( count * sizeof(points[0]));
        CvMat pointMat = cvMat( 1, count, CV_32SC2, points );

        for( i = 0; i < count; i++ )
        {
            pt0.x = rand() % (img->width/2) + img->width/4;
            pt0.y = rand() % (img->height/2) + img->height/4;
            points[i] = pt0;
        }
#ifndef _EiC
        box = cvMinAreaRect2( &pointMat, 0 );
#endif
        cvMinEnclosingCircle( &pointMat, &center, &radius );
#endif
        cvBoxPoints( box, box_vtx );
        cvZero( img );
        for( i = 0; i < count; i++ )
        {
#if !ARRAY                
            pt0 = *CV_GET_SEQ_ELEM( CvPoint, ptseq, i );
#else
            pt0 = points[i];
#endif
            cvCircle( img, pt0, 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 );
        }

#ifndef _EiC
        pt0.x = cvRound(box_vtx[3].x);
        pt0.y = cvRound(box_vtx[3].y);
        for( i = 0; i < 4; i++ )
        {
            pt.x = cvRound(box_vtx[i].x);
            pt.y = cvRound(box_vtx[i].y);
            cvLine(img, pt0, pt, CV_RGB(0, 255, 0), 1, CV_AA, 0);
            pt0 = pt;
        }
#endif
        icenter.x = cvRound(center.x);
        icenter.y = cvRound(center.y);
        cvCircle( img, icenter, cvRound(radius), CV_RGB(255, 255, 0), 1, CV_AA, 0 );

        cvShowImage( "rect & circle", img );

        key = (char) cvWaitKey(0);
        if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
            break;

#if !ARRAY
        cvClearMemStorage( storage );
#else
        free( points );
#endif
    }
    
    cvDestroyWindow( "rect & circle" );
    return 0;
}
Example #14
0
int main(int argc, char* argv[]) {
	cvNamedWindow( "capture", 1 );
	IplImage* img_8uc1 = cvLoadImage( "/home/eugene/hand_gest/TestingData/10-2.png", CV_LOAD_IMAGE_GRAYSCALE );
	IplImage* img_edge = cvCreateImage( cvGetSize(img_8uc1), 8, 1 );
	IplImage* img_8uc3 = cvCreateImage( cvGetSize(img_8uc1), 8, 3 );
	cvThreshold( img_8uc1, img_edge, 128, 255, CV_THRESH_BINARY );
	CvMemStorage* storage = cvCreateMemStorage();
	CvSeq* first_contour = NULL;
	int Nc = cvFindContours(
				img_edge,
				storage,
				&first_contour,
				sizeof(CvContour),
				CV_RETR_EXTERNAL // Try all four values and see what happens
				);
	int i;
	int n=0;
	int best=0;
	int current=0;
	int N=8;
	int n2;
	double Scale;
	double Features[N];
	fftw_complex *contour;
	fftw_complex *FD;
	fftw_plan plan_forward;
	printf( "Total Contours Detected: %d\n", Nc );
	//Find max contour
	for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) {
		if(c->total>current);
			best=n;
		n++;
	}
	fprintf(stderr,"best is %d",best);
	n=0;
	for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) {
		if(n==best){
			cvCvtColor( img_8uc1, img_8uc3, CV_GRAY2BGR );
			cvDrawContours(
				img_8uc3,
				c,
				CVX_RED,
				CVX_BLUE,
				0, // Try different values of max_level, and see what happens
				2,
				8
				);
			printf("Contour #%d\n", n );
			cvShowImage("capture", img_8uc3 );
			printf("%d elements:\n", c->total );
			contour= fftw_malloc(sizeof(fftw_complex)*c->total);

			for( int i=0; i<c->total; ++i ) {
				CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, c, i );
			//	printf("(%d,%d)\n", p->x, p->y );
				//assemble complex representation here
				contour[i][0]=p->x;
				contour[i][1]=p->y;
			}
			//do fft
			//cvCvtSeqToArray(c
			cvWaitKey(0);
		}
		n++;
	}
	//try downspampling later
	FD=fftw_malloc(sizeof(fftw_complex)*c->total);
	plan_forward=fftw_plan_dft_1d(c->total,contour,FD,FFTW_FORWARD,FFTW_ESTIMATE);
	fftw_execute(plan_forward);

	n2=c->total/2;
	Scale=(double)sqrt(pow(FD[1][0],2)+pow(FD[1][1],2));
	//reduce to 10 coefficients
	//normalize
	if(N+2>=c->total)
	{
		fprintf(stderr,"Contour Is too small");
		exit(1);
	}
	//positive frequency components
	for(i=0;i<N/2;i++)
	{
		//fftshift stuff
		Features[i]=(double)sqrt(pow(FD[i+2][0],2)+pow(FD[i+2][1],2))/Scale;
	}
	for(i=0;i<N/2;i++)
	{
		Features[i+N/2]=(double)sqrt(pow(FD[N-1-i][0],2)+pow(FD[N-1-i][1],2))/Scale;
	}

	printf("Finished all contours.\n");
	cvCvtColor( img_8uc1, img_8uc3, CV_GRAY2BGR );
	cvShowImage( "capture", img_8uc3 );
	cvWaitKey(0);
	cvDestroyWindow( "capture" );
	cvReleaseImage( &img_8uc1 );
	cvReleaseImage( &img_8uc3 );
	cvReleaseImage( &img_edge );
	return 0;
}
void  detect(IplImage* img_8uc1,IplImage* img_8uc3) {


//cvNamedWindow( "aug", 1 );


//cvThreshold( img_8uc1, img_edge, 128, 255, CV_THRESH_BINARY );
CvMemStorage* storage = cvCreateMemStorage();
CvSeq* first_contour = NULL;
CvSeq* maxitem=NULL;
double area=0,areamax=0;
int maxn=0;
int Nc = cvFindContours(
img_8uc1,
storage,
&first_contour,
sizeof(CvContour),
CV_RETR_LIST // Try all four values and see what happens
);
int n=0;
//printf( "Total Contours Detected: %d\n", Nc );

if(Nc>0)
{
for( CvSeq* c=first_contour; c!=NULL; c=c->h_next )
{

//cvCvtColor( img_8uc1, img_8uc3, CV_GRAY2BGR );

area=cvContourArea(c,CV_WHOLE_SEQ );

if(area>areamax)
{areamax=area;
maxitem=c;
maxn=n;
}



n++;


}
CvMemStorage* storage3 = cvCreateMemStorage(0);
//if (maxitem) maxitem = cvApproxPoly( maxitem, sizeof(maxitem), storage3, CV_POLY_APPROX_DP, 3, 1 );



if(areamax>5000)
{
maxitem = cvApproxPoly( maxitem, sizeof(CvContour), storage3, CV_POLY_APPROX_DP, 10, 1 );

CvPoint pt0;

CvMemStorage* storage1 = cvCreateMemStorage(0);
CvMemStorage* storage2 = cvCreateMemStorage(0);
CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),
                                     sizeof(CvPoint), storage1 );
        CvSeq* hull;
        CvSeq* defects;

        for(int i = 0; i < maxitem->total; i++ )
        {   CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, maxitem, i );
            pt0.x = p->x;
            pt0.y = p->y;
            cvSeqPush( ptseq, &pt0 );
        }
        hull = cvConvexHull2( ptseq, 0, CV_CLOCKWISE, 0 );
        int hullcount = hull->total;

        defects= cvConvexityDefects(ptseq,hull,storage2  );

        //printf(" defect no %d \n",defects->total);




    CvConvexityDefect* defectArray;


       int j=0;
       //int m_nomdef=0;
        // This cycle marks all defects of convexity of current contours.
        for(;defects;defects = defects->h_next)
        {
            int nomdef = defects->total; // defect amount
          //outlet_float( m_nomdef, nomdef );

            //printf(" defect no %d \n",nomdef);

            if(nomdef == 0)
                continue;

            // Alloc memory for defect set.
    //fprintf(stderr,"malloc\n");
            defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect)*nomdef);

            // Get defect set.
    //fprintf(stderr,"cvCvtSeqToArray\n");
            cvCvtSeqToArray(defects,defectArray, CV_WHOLE_SEQ);

            // Draw marks for all defects.
            for(int i=0; i<nomdef; i++)
            {   printf(" defect depth for defect %d %f \n",i,defectArray[i].depth);
                cvLine(img_8uc3, *(defectArray[i].start), *(defectArray[i].depth_point),CV_RGB(255,255,0),1, CV_AA, 0 );
                cvCircle( img_8uc3, *(defectArray[i].depth_point), 5, CV_RGB(0,0,164), 2, 8,0);
                cvCircle( img_8uc3, *(defectArray[i].start), 5, CV_RGB(0,0,164), 2, 8,0);
                cvLine(img_8uc3, *(defectArray[i].depth_point), *(defectArray[i].end),CV_RGB(255,255,0),1, CV_AA, 0 );

            }
            char txt[]="0";
            txt[0]='0'+nomdef-1;
            CvFont font;
            cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 5, CV_AA);
            cvPutText(img_8uc3, txt, cvPoint(50, 50), &font, cvScalar(0, 0, 255, 0));

        j++;

            // Free memory.
            free(defectArray);
        }


cvReleaseMemStorage( &storage );
cvReleaseMemStorage( &storage1 );
cvReleaseMemStorage( &storage2 );
cvReleaseMemStorage( &storage3 );
//return 0;
}
}
}
/*
**  Target Detection Subroutine
**
**     contours that are passed in here have similar area to 
**     the targets and have 4 corners
*/
void Detect_Targets( CvSeq *raw_contours, CvMat *input_image )
{
   int i,j;
   float xpt[4], ypt[4], xcenter, ycenter;
   float xmin, xmax, ymin, ymax;
   float length_1, length_2;
   float aspect_ratio;
   float distance;
   float dx1, dy1, dx2, dy2;

   CvPoint *point;
   CvPoint pt1;

   int      isTarget = FALSE;

   xmax = ymax = 0.0;
   xmin = ymin = 1e20;

   float cosine1, cosine2, max_cosine;

   /*
   **
   */
   if ( raw_contours->total == 4 )
   {
      /*
      **  Determine min and max extents
      */
      for ( i = 0; i < raw_contours->total; i++)
      {
         point = CV_GET_SEQ_ELEM( CvPoint, raw_contours, i);

         xpt[i] = (float) point->x;
         ypt[i] = (float) point->y;
         xmin = MIN(xmin, xpt[i]);
         xmax = MAX(xmax, xpt[i]);
         ymin = MIN(ymin, ypt[i]);
         ymax = MAX(ymax, ypt[i]);
      }

      dx1 = fabsf(xpt[0] - xpt[1]);
      dy1 = fabsf(ypt[0] - ypt[1]);
      dx2 = fabsf(xpt[1] - xpt[2]);
      dy2 = fabsf(ypt[1] - ypt[2]);

      length_1 = sqrtf( dx1*dx1 + dy1*dy1 );

      length_2 = sqrtf( dx2*dx2 + dy2*dy2 );

      /*
      **  Look at only horizontal rectangles
      **  If the rectangle is not horizontal, it isn't a target.
      */
      if ( (MAX(dx1,dx2) < MAX(dy1,dy2)) )
      {
         return;
      }



      /*
      **  Calculate the angle of the first corner
      **   if it isn't 90 deg (+- 15 deg) then we don't have
      **   a rectangle and reject
      */
      cosine1 =  fabsf( cos_angle( xpt[0], ypt[0],  
                                  xpt[2], ypt[2],
                                  xpt[1], ypt[1] ) );
      cosine2 =  fabsf( cos_angle( xpt[1], ypt[1],  
                                  xpt[3], ypt[3],
                                  xpt[2], ypt[2] ) );
      max_cosine = MAX( cosine1, cosine2 );
      if ( max_cosine > 0.45 ) {
         return;
      }

      /* 
      **  Calculate aspect ratio
      */
      aspect_ratio = MAX( length_1/length_2, length_2/length_1);

      /*
      **  Calculate center of object
      */
      xcenter = ((xmax - xmin) / 2.0) + xmin;
      ycenter = ((ymax - ymin) / 2.0) + ymin;

      /*  
      **  check for high goal opening (3pt) target.
      **   aspect ratio = 4.5
      */
      if ( ((aspect_ratio > 4.2) && (aspect_ratio < 7.6)) )
      {
         /*
         **  Calculate distance to target
         **    the opening target 3 is 54" wide by 12" tall
         */
         distance = 0.01745 * ((MAX(length_1,length_2) / 640.0) * 48.8) / 2.0;
         distance = (54.0/2.0) / tanf(distance);

         if ( (distance/12.0) < 45 ) /* field is 54 ft long */
         {
            isTarget = TRUE;
            detected_targets[num_detect_targets].type = 3;
            detected_targets[num_detect_targets].distance = distance/12.0;
            pt1.x = xcenter+20;  pt1.y = ycenter;
         }
      }

      /*  
      **  check for high goal outside boundary (3pt) target.
      **    62" by 20"
      **   aspect ratio = 3.1
      */
     if ( ((aspect_ratio >= 2.8) && (aspect_ratio < 3.2)) ) 
      {
//  printf("outside 3 pt boundary detect\n");
         /*
         **  Calculate distance to target
         **    the outside of target 3 is 62"wide by 20" tall
         */
         distance = 0.01745 * ((MAX(length_1,length_2) / 640.0) * 48.8) / 2.0;
         distance = (62.0/2.0) / tanf(distance);

         if ( (distance/12.0) < 45 ) 
         {
            isTarget = TRUE;
            detected_targets[num_detect_targets].type = 3;
            detected_targets[num_detect_targets].distance = distance/12.0;
         }

      }

      /*  
      **  check for middle goal opening (2pt) target.
      **   54" x 21"
      **   aspect ratio = 2.6
      */
      if ( ( (aspect_ratio > 2.4) && (aspect_ratio < 2.8) )  )
//        || ((aspect_ratio >= 2.8) && (aspect_ratio < 3.3)) )
      {
         /*
         **  Calculate distance to target
         **    the opening 2pt target is 54"wide by 21" tall
         */
         distance = 0.01745 * ((MAX(length_1,length_2) / 640.0) * 48.8) / 2.0;
         distance = (54.0/2.0) / tanf(distance);

         if ( (distance/12.0) < 45 ) /* field is 54 ft long */
         {
            isTarget = TRUE;
            detected_targets[num_detect_targets].type = 2;
            detected_targets[num_detect_targets].distance = distance/12.0;
         }
      }

      /*  
      **  check for middle goal border (2pt) target.
      **    62" x 29"
      **   aspect ratio = 2.14
      **    2.27, 2.3, 2.29
      */
      if ( ( (aspect_ratio > 1.8) && (aspect_ratio <= 2.4) )  ) 
      {
         /*
         **  Calculate distance to target
         **    the outside of 2pt target is 62" wide by 29" tall
         */
         distance = 0.01745 * ((MAX(length_1,length_2) / 640.0) * 48.8) / 2.0;
         distance = (62.0/2.0) / tanf(distance);

         if ( (distance/12.0) < 45 ) 
         {
            isTarget = TRUE;
            detected_targets[num_detect_targets].type = 2;
            detected_targets[num_detect_targets].distance = distance/12.0;
         }
      }

      /*
      **  See if we determined that one of the previous conditions
      **   were met, if so (isTarget = TRUE) then save all the 
      **   target information
      */
      if ( isTarget ) 
      {
         detected_targets[num_detect_targets].aspect_ratio = aspect_ratio;
         detected_targets[num_detect_targets].xcenter = xcenter;
         detected_targets[num_detect_targets].ycenter = ycenter;

         detected_targets[num_detect_targets].xmin = xmin;
         detected_targets[num_detect_targets].ymin = ymin;
         detected_targets[num_detect_targets].xmax = xmax;
         detected_targets[num_detect_targets].ymax = ymax;

         detected_targets[num_detect_targets].time_tracked = 1;

         if ( length_1 > length_2 ) {
            detected_targets[num_detect_targets].h_length = length_1;
            detected_targets[num_detect_targets].v_length = length_2;
         } else {
            detected_targets[num_detect_targets].h_length = length_2;
            detected_targets[num_detect_targets].v_length = length_1;
         }

         for ( j = 0; j < 4; j++ ) {
            detected_targets[num_detect_targets].xpt[j] = xpt[j];
            detected_targets[num_detect_targets].ypt[j] = ypt[j];
         }

         detected_targets[num_detect_targets].area = 
             fabsf(cvContourArea( raw_contours, CV_WHOLE_SEQ, 0));
         
         detected_targets[num_detect_targets].h_angle =
              (xcenter - camera_info.h_pixels/2.0) * camera_info.h_ifov;
         detected_targets[num_detect_targets].v_angle =
              (ycenter - camera_info.v_pixels/2.0) * camera_info.v_ifov * -1;
         
         /*
         **  calculate angular size 
         */
         detected_targets[num_detect_targets].h_len_deg = 
            detected_targets[num_detect_targets].h_length * camera_info.h_ifov;
         detected_targets[num_detect_targets].v_len_deg = 
            detected_targets[num_detect_targets].v_length * camera_info.v_ifov;
           
         
         /* draw target center */

//         cross_color = CV_RGB(255,255,255);
//         draw_target_center( detected_targets[num_detect_targets],
//                             image, cross_color );

         if ( num_detect_targets < (MAX_TRACKED_TARGETS -1) )
         {
            num_detect_targets++;
         }  else {

         }

      }

#ifdef GRAPHICS
    if (isTarget) {
      cvDrawContours( input_image, raw_contours, 
                       CV_RGB(255,255,255), CV_RGB(200,255,255), 
                       0, 1, 8, cvPoint(0,0));
    }
#endif

   }

}
Example #17
0
void CHandDrawEffect::DrawEdge(IplImage* image, IplImage* image2, IplImage* base, int plane)
{
	CvSeq* contourSeq0 = NULL;

	int height    = image->height;
	int width     = image->width;
	int step      = image->widthStep;
	int channels  = image->nChannels;
	uchar* data   = (uchar*)image->imageData;

	if(plane < 3) {
		cvCvtColor(image, hsv, CV_BGR2HSV);				// HSVのplaneを線画生成の元にする
		for(int i = 0; i < height * width; i++)
			grayImage->imageData[i] = hsv->imageData[i * 3 + plane];
	} else {
		cvCvtColor(image, grayImage, CV_BGR2GRAY);		// グレーイメージを作り線画生成の元にする
	}

	IplImage* target = base;					// 書き込むターゲットイメージ

	for(int x = 20; x < 240; x += Y) {
		cvThreshold(grayImage, binaryImage, x, 255, CV_THRESH_BINARY);	// x の値を境に2値化し輪郭を抽出
		contourSeq0 = 0;
		cvFindContours(binaryImage, memStorage0, &contourSeq0, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0)); // 輪郭線探索

		if(lineNoise > 0) { // 不連続ラインの場合
			for(; contourSeq0 != 0; contourSeq0 = contourSeq0->h_next) {
				CvPoint *p;
				if(contourSeq0->total< X * 5) continue;		// 5角形以下の細かいのは排除

				int index = 0;
				for(int i = 0; i < contourSeq0->total; i += X) {
					p = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, i);				// 点の場所と色を登録
					CvScalar color = GetColor(image2, p->x, p->y);
					MulScaler(color, DARK);										// 輝度を修正
					color.val[3] = CheckPoint(image, p->x, p->y, lineNoise);	// 有効点かどうかを近接ピクセルから判断して[3]へ格納
					SetPoint(index, p, color);									// pointTableへ保存
					index++;
					if(index > MAX_POINT) {
					//	printf("INDEX ERROR\n"); 
						index = 0;
					}
				}
				// 5連続以下の有効点は無効 (Pending:高速化)
				for(int i = 0; i < index; i++) {
					int p1 = i;
					int p2, p3, p4, p0;
					if(pointTable[p1].color.val[3]) {
						p2 = (p1 + 1) % index;
						p3 = (p1 + 2) % index;
						p4 = (p1 + 3) % index;
						p0 = (p1 - 1 + index) % index;
						if(pointTable[p0].color.val[3]) continue;
						if(!pointTable[p2].color.val[3] ||
							!pointTable[p3].color.val[3] ||
							!pointTable[p4].color.val[3]) {						
							pointTable[p1].color.val[3] = 0;
						}
					}
				}
				// 接続された有効点を描く
				for(int i = 0; i < index; i++) {
					int p1 = i;
					int p2 = (i + 1) % index;	// if (p2==index) p2 = 0;
					if(pointTable[p1].color.val[3] && pointTable[p2].color.val[3]) {
						CvScalar c = pointTable[p1].color;
						MulScaler(c, DARK);
						cvLine(target, pointTable[p1].p, pointTable[p2].p, c, lineWidth, CV_AA);
					}
				}
			}
		} else {
			// 全部描く場合
			for(; contourSeq0 != 0; contourSeq0 = contourSeq0->h_next) {

				CvPoint *p1 = 0;
				CvPoint *p2;

				if(contourSeq0->total < X * 5) continue;		

				for(int i = 0; i < contourSeq0->total; i += X) {
					p1 = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, (i) % contourSeq0->total);//始点
					p2 = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, (i + X + Z) % contourSeq0->total);// 終点
					CvScalar color = GetColor(image, p1->x, p1->y);
					MulScaler(color, DARK);
					cvLine(target, *p1, *p2, color, lineWidth, CV_AA);
				}
			}
		}
	}
	cvClearMemStorage(memStorage0);
}
Example #18
0
static gboolean
hands_are_praying (guint16* depth,
                   guint width,
                   guint height,
                   SkeltrackJointList list)
{
  guint x, y, z;
  SkeltrackJoint *head, *left_elbow, *left_shoulder,
    *right_shoulder, *right_elbow;
  CvSeq *defects = NULL;

  if (list == NULL)
    return FALSE;

  head = skeltrack_joint_list_get_joint (list, SKELTRACK_JOINT_ID_HEAD);
  right_elbow = skeltrack_joint_list_get_joint (list,
                                                SKELTRACK_JOINT_ID_RIGHT_ELBOW);
  left_elbow = skeltrack_joint_list_get_joint (list,
                                               SKELTRACK_JOINT_ID_LEFT_ELBOW);
  right_shoulder = skeltrack_joint_list_get_joint (list,
                                                SKELTRACK_JOINT_ID_RIGHT_SHOULDER);
  left_shoulder = skeltrack_joint_list_get_joint (list,
                                               SKELTRACK_JOINT_ID_LEFT_SHOULDER);


  if (head == NULL || right_elbow == NULL ||
      left_elbow == NULL ||
      ((right_elbow->y < right_shoulder->y) &&
       (left_elbow->y < left_shoulder->y)))
    return FALSE;

  x = head->screen_x;
  y = right_elbow->screen_y;
  z = ((gfloat) (right_shoulder->z + left_shoulder->z)) / 2.0 - 300;

  defects = get_defects (depth, width, height, x, y, z);

  if (defects)
    {
      guint i, sum;

      sum = 0;
      for (i = 0; i < defects->total; i++)
        {
          gfloat orientation;
          CvConvexityDefect *defect = CV_GET_SEQ_ELEM (CvConvexityDefect,
                                                       defects,
                                                       i);
          orientation = get_orientation_angle (defect->start,
                                               defect->depth_point,
                                               defect->end);
          orientation = ((gint) (orientation / M_PI * 180.0)) % 360;
          if (defect->depth > 20.0 &&
              orientation < 180 &&
              orientation > 0 &&
              ABS (orientation - 90) > 25)
            {
              gfloat x1, x2, sig_x1, sig_x2;
              x1 = defect->start->x - defect->depth_point->x;
              x2 = defect->end->x - defect->depth_point->x;
              sig_x1 = x1 / ABS (x1);
              sig_x2 = x2 / ABS (x2);
              if (sig_x1 != sig_x2 || (x1 == 0 || x2 == 0))
                {
                  continue;
                }
              cvSeqRemove(defects, i);
              i--;
            }
          else
            {
              cvSeqRemove(defects, i);
              i--;
            }
        }

      if (defects->total > 1)
        {
          for (i = 1; i < defects->total; i++)
            {
              gfloat dist_hand1, dist_hand2, dist_depth_points;
              CvConvexityDefect *defect1, *defect2;
              CvPoint *defect1_top_point, *defect2_top_point;
              defect1 = CV_GET_SEQ_ELEM (CvConvexityDefect,
                                         defects,
                                         i);
              defect2 = CV_GET_SEQ_ELEM (CvConvexityDefect,
                                         defects,
                                         i - 1);

              if (defect1->end->y < defect1->start->y)
                defect1_top_point = defect1->end;
              else
                defect1_top_point = defect1->start;

              if (defect2->end->y < defect2->start->y)
                defect2_top_point = defect2->end;
              else
                defect2_top_point = defect2->start;

              dist_hand1 = get_points_distance (defect1_top_point,
                                                defect1->depth_point);
              dist_hand2 = get_points_distance (defect2_top_point,
                                                defect2->depth_point);
              dist_depth_points = get_points_distance (defect1->depth_point,
                                                       defect2->depth_point);
              if (dist_depth_points < MAX (dist_hand1, dist_hand2))
                sum++;
            }
        }

      if (sum > 0)
        return TRUE;
    }

  return FALSE;
}
void CV_PyrSegmentationTest::run( int /*start_from*/ )
{
    const int level = 5;
    const double range = 20;

    int code = CvTS::OK;

    CvPoint _cp[] ={{33,33}, {43,33}, {43,43}, {33,43}};
    CvPoint _cp2[] ={{50,50}, {70,50}, {70,70}, {50,70}};
    CvPoint* cp = _cp;
    CvPoint* cp2 = _cp2;
    CvConnectedComp *dst_comp[3];
    CvRect rect[3] = {{50,50,21,21}, {0,0,128,128}, {33,33,11,11}};
    double a[3] = {441.0, 15822.0, 121.0};

/*    ippiPoint cp3[] ={130,130, 150,130, 150,150, 130,150};  */
/*	CvPoint cp[] ={0,0, 5,5, 5,0, 10,5, 10,0, 15,5, 15,0};  */
    int nPoints = 4;
    int block_size = 1000;

    CvMemStorage *storage;   /*   storage for connected component writing  */
    CvSeq *comp;

    CvRNG* rng = ts->get_rng();
    int i, j, iter;

    IplImage *image, *image_f, *image_s;
    CvSize size = {128, 128};
    const int threshold1 = 50, threshold2 = 50;

    rect[1].width = size.width;
    rect[1].height = size.height;
    a[1] = size.width*size.height - a[0] - a[2];

    OPENCV_CALL( storage = cvCreateMemStorage( block_size ) );

    for( iter = 0; iter < 2; iter++ )
    {
        int channels = iter == 0 ? 1 : 3;
        int mask[] = {0,0,0};

        image = cvCreateImage(size, 8, channels );
        image_s = cvCloneImage( image );
        image_f = cvCloneImage( image );

        if( channels == 1 )
        {
            int color1 = 30, color2 = 110, color3 = 180;

            cvSet( image, cvScalarAll(color1));
            cvFillPoly( image, &cp, &nPoints, 1, cvScalar(color2));
            cvFillPoly( image, &cp2, &nPoints, 1, cvScalar(color3));
        }
        else
        {
            CvScalar color1 = CV_RGB(30,30,30), color2 = CV_RGB(255,0,0), color3 = CV_RGB(0,255,0);

            assert( channels == 3 );
            cvSet( image, color1 );
            cvFillPoly( image, &cp, &nPoints, 1, color2);
            cvFillPoly( image, &cp2, &nPoints, 1, color3);
        }

        cvRandArr( rng, image_f, CV_RAND_UNI, cvScalarAll(0), cvScalarAll(range*2) );
        cvAddWeighted( image, 1, image_f, 1, -range, image_f );

        cvPyrSegmentation( image_f, image_s,
                           storage, &comp,
                           level, threshold1, threshold2 );

        if(comp->total != 3)
        {
            ts->printf( CvTS::LOG,
                "The segmentation function returned %d (not 3) components\n", comp->total );
            code = CvTS::FAIL_INVALID_OUTPUT;
            goto _exit_;
        }
        /*  read the connected components     */
        dst_comp[0] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 0 );
        dst_comp[1] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 1 );
        dst_comp[2] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 2 );

        /*{
            for( i = 0; i < 3; i++ )
            {
                CvRect r = dst_comp[i]->rect;
                cvRectangle( image_s, cvPoint(r.x,r.y), cvPoint(r.x+r.width,r.y+r.height),
                    CV_RGB(255,255,255), 3, 8, 0 );
            }

            cvNamedWindow( "test", 1 );
            cvShowImage( "test", image_s );
            cvWaitKey(0);
        }*/

        code = cvTsCmpEps2( ts, image, image_s, 10, false, "the output image" );
        if( code < 0 )
            goto _exit_;

        for( i = 0; i < 3; i++)
        {
            for( j = 0; j < 3; j++ )
            {
                if( !mask[j] && dst_comp[i]->area == a[j] &&
                    dst_comp[i]->rect.x == rect[j].x &&
                    dst_comp[i]->rect.y == rect[j].y &&
                    dst_comp[i]->rect.width == rect[j].width &&
                    dst_comp[i]->rect.height == rect[j].height )
                {
                    mask[j] = 1;
                    break;
                }
            }
            if( j == 3 )
            {
                ts->printf( CvTS::LOG, "The component #%d is incorrect\n", i );
                code = CvTS::FAIL_BAD_ACCURACY;
                goto _exit_;
            }
        }

        cvReleaseImage(&image_f);
        cvReleaseImage(&image);
        cvReleaseImage(&image_s);
    }

_exit_:

    cvReleaseMemStorage( &storage );
    cvReleaseImage(&image_f);
    cvReleaseImage(&image);
    cvReleaseImage(&image_s);

    if( code < 0 )
        ts->set_failed_test_info( code );
}
Example #20
0
void  detect(IplImage* img_8uc1,IplImage* img_8uc3) 
{  
	clock_t inicio, fin;
	inicio = clock();

	CvMemStorage* storage = cvCreateMemStorage();
	CvSeq* first_contour = NULL;
	CvSeq* maxitem=NULL;
	char resultado [] = " ";
	double area=0,areamax=0;
	double longitudExt = 0;
	double radio = 0;
	int maxn=0;
	int Nc = cvFindContours(
		img_8uc1,
		storage,
		&first_contour,
		sizeof(CvContour),
		CV_RETR_LIST 
		);
	int n=0;
	//printf( "Contornos detectados: %d\n", Nc );

	if(Nc>0)
	{
		for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) 
		{     
			area=cvContourArea(c,CV_WHOLE_SEQ );

			if(area>areamax)
			{
				areamax=area;
				maxitem=c;
				maxn=n;
			}
			n++;
		}

		CvMemStorage* storage3 = cvCreateMemStorage(0);

		if(areamax>5000)
		{
			maxitem = cvApproxPoly( maxitem, sizeof(CvContour), storage3, CV_POLY_APPROX_DP, 10, 1 );
			CvPoint pt0;

			CvMemStorage* storage1 = cvCreateMemStorage(0);
			CvMemStorage* storage2 = cvCreateMemStorage(0);
			CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storage1 );
			CvSeq* hull;
			CvSeq* defects;

			CvPoint minDefectPos;;
			minDefectPos.x = 1000000;
			minDefectPos.y = 1000000;

			CvPoint maxDefectPos;
			maxDefectPos.x = 0;
			maxDefectPos.y = 0;			


			for(int i = 0; i < maxitem->total; i++ )
			{   
				CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, maxitem, i );
				pt0.x = p->x;
				pt0.y = p->y;
				cvSeqPush( ptseq, &pt0 );
			}
			hull = cvConvexHull2( ptseq, 0, CV_CLOCKWISE, 0 );
			int hullcount = hull->total;

			defects= cvConvexityDefects(ptseq,hull,storage2  );

			//printf(" Numero de defectos %d \n",defects->total);
			CvConvexityDefect* defectArray;  

			int j=0;  

			// This cycle marks all defects of convexity of current contours.  

			longitudExt = 0;

			for(;defects;defects = defects->h_next)  
			{  
				int nomdef = defects->total; // defect amount  
				//outlet_float( m_nomdef, nomdef );  
				//printf(" defect no %d \n",nomdef);

				if(nomdef == 0)  
					continue;  

				// Alloc memory for defect set.     
				//fprintf(stderr,"malloc\n");  
				defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect)*nomdef);  

				// Get defect set.  
				//fprintf(stderr,"cvCvtSeqToArray\n");  
				cvCvtSeqToArray(defects,defectArray, CV_WHOLE_SEQ); 






				// Draw marks for all defects.  
				for(int i=0; i<nomdef; i++)  
				{  					

					CvPoint startP;
					startP.x = defectArray[i].start->x;
					startP.y = defectArray[i].start->y;

					CvPoint depthP;
					depthP.x = defectArray[i].depth_point->x;
					depthP.y = defectArray[i].depth_point->y;

					CvPoint endP;
					endP.x = defectArray[i].end->x;
					endP.y = defectArray[i].end->y;





					//obtener minimo y maximo

					minDefectPos.x = getMin (startP.x, depthP.x, endP.x, minDefectPos.x);
					minDefectPos.y = getMin (startP.y, depthP.y, endP.y, minDefectPos.y);

					maxDefectPos.x = getMax (startP.x, depthP.x, endP.x, maxDefectPos.x);
					maxDefectPos.y = getMax (startP.y, depthP.y, endP.y, maxDefectPos.y);					

					//fin obtener minimo y maximo
					if (saveLength)
					{
						longitudExt += longBtwnPoints(startP, depthP);
						longitudExt += longBtwnPoints(depthP, endP);


					}
					//printf(" defect depth for defect %d %f \n",i,defectArray[i].depth);
					cvLine(img_8uc3, startP, depthP, CV_RGB(255,255,0),1, CV_AA, 0 ); 


					cvCircle( img_8uc3, depthP, 5, CV_RGB(0,0,164), 2, 8,0);  
					cvCircle( img_8uc3, startP, 5, CV_RGB(255,0,0), 2, 8,0);  
					cvCircle( img_8uc3, endP, 5, CV_RGB(0,255,0), 2, 8,0);  

					cvLine(img_8uc3, depthP, endP,CV_RGB(0,0,0),1, CV_AA, 0 );   
				} 

				/*if (nomdef>0)
				{
				resultado [0] = identificaGesto (longitudExt, nomdef, radio);
				if (resultado[0] !=' ')
				printf ("Gesto identificado (%c) \n", resultado[0]);
				}*/

				if (saveLength)
				{
					radio = (double)maxDefectPos.x / (double)maxDefectPos.y;
					if (nomdef>0)
					{
						printf ("_______________________\n");

						
 
						
						

						resultado [0] = identificaGesto (longitudExt, nomdef, radio);
						
						fin = clock();
						fin = fin - inicio;

						if (resultado[0] !=' ')
							printf ("Gesto identificado (%c) \n", resultado[0]);
						else
							printf ("No se identifico ningun gesto\n");

						printf("Tiempo de ejecucion: %f\nLongitud %g \nNomDef %i \nradio %g \n",(((float)fin)/CLOCKS_PER_SEC ), longitudExt, nomdef, radio);
						FILE *fp;
						fp=fopen("archivo.txt", "a");
						if (nomdef == 6)
							fprintf(fp, "\n>>>>>>>5<<<<<<\n%g\n%i\n%g\n",longitudExt, nomdef, radio);
						else
							fprintf(fp, "\n%g\n%i\n%g\n",longitudExt, nomdef, radio);
						fclose (fp);
					}
					else
						printf("No hay defectos");
					printf ("_______________________\n");
				}
				/*
				char txt[]="0";
				txt[0]='0'+nomdef-1;
				CvFont font;
				cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 5, CV_AA);
				cvPutText(img_8uc3, txt, cvPoint(50, 50), &font, cvScalar(0, 0, 255, 0)); */

				CvFont font;
				cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 5, CV_AA);
				if (resultado!= NULL)
					cvPutText(img_8uc3, resultado, cvPoint(50, 50), &font, cvScalar(0, 0, 255, 0));

				j++;  

				// Free memory.   
				free(defectArray);  
			} 

			pt0 = **CV_GET_SEQ_ELEM( CvPoint*, hull, hullcount - 1 );

			for(int i = 0; i < hullcount; i++ )
			{

				CvPoint pt = **CV_GET_SEQ_ELEM( CvPoint*, hull, i );
				cvLine( img_8uc3, pt0, pt, CV_RGB( 0, 255, 0 ), 1, CV_AA, 0 );
				pt0 = pt;
			}



			cvLine( img_8uc3, minDefectPos, cvPoint( (maxDefectPos.x), (minDefectPos.y)), CV_RGB( 2500, 0, 0 ), 1, CV_AA, 0 );
			cvLine( img_8uc3,  cvPoint( (maxDefectPos.x), (minDefectPos.y)), maxDefectPos, CV_RGB( 2500, 0, 0 ), 1, CV_AA, 0 );
			cvLine( img_8uc3, maxDefectPos, cvPoint( (minDefectPos.x), (maxDefectPos.y)), CV_RGB( 2500, 0, 0 ), 1, CV_AA, 0 );
			cvLine( img_8uc3, cvPoint( (minDefectPos.x), (maxDefectPos.y)), minDefectPos, CV_RGB( 2500, 0, 0 ), 1, CV_AA, 0 );

			cvReleaseMemStorage( &storage );
			cvReleaseMemStorage( &storage1 );
			cvReleaseMemStorage( &storage2 );
			cvReleaseMemStorage( &storage3 );
			//return 0;
		}
	}
static int aPyrSegmentation(void* agr)
{
    CvPoint _cp[] ={33,33, 43,33, 43,43, 33,43}; 
    CvPoint _cp2[] ={50,50, 70,50, 70,70, 50,70};  
    CvPoint* cp = _cp;
    CvPoint* cp2 = _cp2;
    CvConnectedComp *dst_comp[3];
    CvRect rect[3] = {50,50,21,21, 0,0,128,128, 33,33,11,11};
    double a[3] = {441.0, 15822.0, 121.0};

/*    ippiPoint cp3[] ={130,130, 150,130, 150,150, 130,150};  */
/*	CvPoint cp[] ={0,0, 5,5, 5,0, 10,5, 10,0, 15,5, 15,0};  */
    int chanels = (int)agr;    /* number of the color chanels  */
    int width = 128;
    int height = 128;
    int nPoints = 4;
    int block_size = 1000;
    int color1 = 30, color2 = 110, color3 = 180;
    int level = 5;
    long diff, l;
    int code;

    CvMemStorage *storage;   /*   storage for connected component writing  */
    CvSeq *comp;

    double lower, upper;
    unsigned seed;
    char rand;
    AtsRandState state;
    int i,j;

    IplImage *image, *image_f, *image_s;
    CvSize size;
    uchar *f_cur, *f_row;
    uchar *row;
    uchar *cur;
    int threshold1, threshold2;

    code = TRS_OK;

    if(chanels != 1 && chanels != 3)
        return TRS_UNDEF;
/* read tests params */

    if(!trsiRead( &width, "128", "image width" ))
        return TRS_UNDEF;
    if(!trsiRead( &height, "128", "image height" ))
        return TRS_UNDEF;
    if(!trsiRead( &level, "5", "pyramid level" ))
        return TRS_UNDEF;


/*  create Image   */
    l = width*height;
    size.width = width;
    size.height = height;

    rect[1].height = height;
    rect[1].width = width;
    a[1] = l - a[0] - a[2];

    image = cvCreateImage(cvSize(size.width, size.height), IPL_DEPTH_8U, chanels); 
    image_s = cvCreateImage(cvSize(size.width, size.height), IPL_DEPTH_8U, chanels); 

    memset(image->imageData, color1, chanels*l);

    image_f = cvCreateImage(cvSize(size.width, size.height), IPL_DEPTH_8U, chanels); 

    OPENCV_CALL( storage = cvCreateMemStorage( block_size ) );

/*  do noise   */
    upper = 20;
    lower = -upper;
    seed = 345753;
    atsRandInit( &state, lower, upper, seed );

/*   segmentation by pyramid     */    
    threshold1 = 50;
    threshold2 = 50;

    switch(chanels)
    {
        case 1:
        {
            cvFillPoly( image, &cp, &nPoints, 1, color2);
            cvFillPoly( image, &cp2, &nPoints, 1, color3); 

            row = (uchar*)image->imageData;
            f_row = (uchar*)image_f->imageData;
            for(i = 0; i<size.height; i++)
            {
                cur = row;
                f_cur = f_row;
                for(j = 0; j<size.width; j++)
                {
                    atsbRand8s( &state, &rand, 1);
                    *(f_cur)=(uchar)((*cur) + rand);
                    cur++;
                    f_cur++;
                }
                row+=image->widthStep;
                f_row+=image_f->widthStep;
            }

            cvPyrSegmentation( image_f, image_s,
                               storage, &comp, 
                               level, threshold1, threshold2 );

            //if(comp->total != 3) { code = TRS_FAIL; goto exit; }
/*  read the connected components     */
            /*dst_comp[0] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 0 );
            dst_comp[1] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 1 );
            dst_comp[2] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 2 );*/
            break;
        }
        case 3:
        {
            cvFillPoly( image, &cp, &nPoints, 1, CV_RGB(color2,color2,color2));
            cvFillPoly( image, &cp2, &nPoints, 1, CV_RGB(color3,color3,color3)); 

            row = (uchar*)image->imageData;
            f_row = (uchar*)image_f->imageData;
            for(i = 0; i<size.height; i++)
            {
                cur = row;
                f_cur = f_row;
                for(j = 0; j<size.width; j++)
                {
                    atsbRand8s( &state, &rand, 1);
                    *(f_cur)=(uchar)((*cur) + rand);
                    atsbRand8s( &state, &rand, 1);
                    *(f_cur+1)=(uchar)(*(cur+1) + rand);
                    atsbRand8s( &state, &rand, 1);
                    *(f_cur+2)=(uchar)(*(cur+2) + rand);
                    cur+=3;
                    f_cur+=3;
                }
                row+=image->widthStep;
                f_row+=image_f->widthStep;
            }

            cvPyrSegmentation(image_f, image_s, storage, &comp, level,
                              threshold1, threshold2);   
/*  read the connected components     */
            if(comp->total != 3) { code = TRS_FAIL; goto exit; }
            dst_comp[0] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 0 );
            dst_comp[1] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 1 );
            dst_comp[2] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 2 );
            break;
        }
    }
 
    diff = 0;
    /*diff = atsCompare1Db( (uchar*)image->imageData, (uchar*)image_s->imageData, chanels*l, 4);
 
    for(i = 0; i < 3; i++)
    {
        if(dst_comp[i]->area != a[i]) diff++;
        if(dst_comp[i]->rect.x != rect[i].x) diff++;
        if(dst_comp[i]->rect.y != rect[i].y) diff++;
        if(dst_comp[i]->rect.width != rect[i].width) diff++;
        if(dst_comp[i]->rect.height != rect[i].height) diff++;
    }*/

    trsWrite( ATS_CON | ATS_LST | ATS_SUM, "upper =%f diff =%ld \n",upper, diff);

    if(diff > 0 )
        code = TRS_FAIL;
    else
        code = TRS_OK;

exit:

    cvReleaseMemStorage( &storage );
    cvReleaseImage(&image_f);
    cvReleaseImage(&image);
    cvReleaseImage(&image_s);

   

/*    trsFree(cp);  */
/*    _getch();     */
    return code;
 
    
}
int main( int argc, char** argv )
{
	char path[1024];
	IplImage* img;
	help();
	if (argc!=2)
	{
		strcpy(path,"puzzle.png");
		img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
		if (!img)
		{
			printf("\nUsage: mser_sample <path_to_image>\n");
			return 0;
		}
	}
	else
	{
		strcpy(path,argv[1]);
		img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
	}
	
	if (!img)
	{
		printf("Unable to load image %s\n",path);
		return 0;
	}
	IplImage* rsp = cvLoadImage( path, CV_LOAD_IMAGE_COLOR );
	IplImage* ellipses = cvCloneImage(rsp);
	cvCvtColor(img,ellipses,CV_GRAY2BGR);
	CvSeq* contours;
	CvMemStorage* storage= cvCreateMemStorage();
	IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
	cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
	CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );

	double t = (double)cvGetTickCount();
	cvExtractMSER( hsv, NULL, &contours, storage, params );
	t = cvGetTickCount() - t;
	printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
	uchar* rsptr = (uchar*)rsp->imageData;
	// draw mser with different color
	for ( int i = contours->total-1; i >= 0; i-- )
	{
		CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
		for ( int j = 0; j < r->total; j++ )
		{
			CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
			rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
			rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
			rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
		}
	}
	// find ellipse ( it seems cvfitellipse2 have error or sth?
	for ( int i = 0; i < contours->total; i++ )
	{
		CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
		CvBox2D box = cvFitEllipse2( r );
		box.angle=(float)CV_PI/2-box.angle;
		
		if ( r->color > 0 )
			cvEllipseBox( ellipses, box, colors[9], 2 );
		else
			cvEllipseBox( ellipses, box, colors[2], 2 );
			
	}

	cvSaveImage( "rsp.png", rsp );

	cvNamedWindow( "original", 0 );
	cvShowImage( "original", img );
	
	cvNamedWindow( "response", 0 );
	cvShowImage( "response", rsp );

	cvNamedWindow( "ellipses", 0 );
	cvShowImage( "ellipses", ellipses );

	cvWaitKey(0);

	cvDestroyWindow( "original" );
	cvDestroyWindow( "response" );
	cvDestroyWindow( "ellipses" );
	cvReleaseImage(&rsp);
	cvReleaseImage(&img);
	cvReleaseImage(&ellipses);
	
}
Example #23
0
void DetectAndDraw(IplImage* frame, HandyServer& server)
{
  double t = (double)cvGetTickCount();

  IplImage* img_nvg = cvCreateImage(cvGetSize(frame), frame->depth, 1);
  IplImage* img_imp = cvCloneImage(img_nvg);

  // Niveau de gris puis binarisation
  cvConvertImage(frame, img_nvg, (frame->origin!=IPL_ORIGIN_TL) ? CV_CVTIMG_FLIP : 0);
  cvThreshold(img_nvg, img_imp, SEUIL_BINAIRE, 255, CV_THRESH_BINARY_INV);

  // dilatation puis erosion
  IplConvKernel* noyau = cvCreateStructuringElementEx(DILATE_RADIUS,
                                                      DILATE_RADIUS,
                                                      DILATE_RADIUS/2,
                                                      DILATE_RADIUS/2,
                                                      CV_SHAPE_ELLIPSE);
  cvDilate(img_imp, img_imp, noyau);
  cvErode(img_imp,  img_imp, noyau);

  // Detection des formes
  CvMemStorage* storage = cvCreateMemStorage(0);
  CvSeq* contour = NULL;
  cvFindContours(img_imp, storage, &contour, sizeof(CvContour));

  // Calcul du centre des doigts et envoie au server
  std::vector<Pos2d> positions;
  for(;contour != 0; contour = contour->h_next)
  {
    int max_x = -1, max_y = -1, min_x = MAX_XY, min_y = MAX_XY;
    for(int i = 0; i < contour->total; ++i)
    {
      CvPoint* p = CV_GET_SEQ_ELEM(CvPoint, contour, i);
      if (p->x > max_x) max_x = p->x;
      if (p->y > max_y) max_y = p->y;
      if (p->x < min_x) min_x = p->x;
      if (p->y < min_y) min_y = p->y;
    }

    CvPoint center;
    Pos2d pos((max_x + min_x) / 2, (max_y + min_y) / 2);
    center.x = pos.x;
    center.y = pos.y;
    positions.push_back(pos);

    int szx = max_x - min_x, szy = max_y - min_y;
    if (szx > MIN_SZ && szy > MIN_SZ &&
        szx < MAX_SZ && szy < MAX_SZ &&
        szx / (float)szy < MAX_RATIO &&
        szy / (float) szx < MAX_RATIO)
      cvCircle(frame, center, 16, CV_RGB(255,0,0));
  }
  server.sendPos(positions);

  // Liberation de la memoire
  cvReleaseMemStorage(&storage);
  cvReleaseImage(&img_nvg);
  cvReleaseImage(&img_imp);

  // Calcul du temps ecoulé et affichage de l'image
  t = (double)cvGetTickCount() - t;
  std::cout << "detection time = " << t/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;

  cvShowImage("result", frame);
}
Example #24
0
void EyeTracker::locate_corneal_reflection( int sx, int sy, int window_size, int 
							   biggest_crar, int &crx, int &cry, int &crar)
{
	if(window_size % 2 == 0)
	{
		printf("Error! window_size should be odd!\n");
	}

	int r = (window_size-1)/2;
	int startx = MAX(sx-r, 0);
	int endx = MIN(sx+r, grayEyeImagePts->width-1);
	int starty = MAX(sy-r, 0);
	int endy = MIN(sy+r, grayEyeImagePts->height-1);
	cvSetImageROI(grayEyeImagePts, cvRect(startx, starty, endx-startx+1, endy-starty+1));
	cvSetImageROI(threshold_image, cvRect(startx, starty, endx-startx+1, endy-starty+1));

	double min_value, max_value;
	CvPoint min_loc, max_loc; //location


	cvMinMaxLoc(grayEyeImagePts, &min_value, &max_value, &min_loc, &max_loc);

	int threshold, i;
	CvSeq* contour=NULL;
	CvMemStorage* storage = cvCreateMemStorage(0);
	double *scores = (double*)malloc(sizeof(double)*((int)max_value+1));
	memset(scores, 0, sizeof(double)*((int)max_value+1));
	int area, max_area, sum_area;
	for (threshold = (int)max_value; threshold >= 1; threshold--) {
		cvThreshold(grayEyeImagePts, threshold_image, threshold, 1, CV_THRESH_BINARY);
		cvFindContours(threshold_image, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE);    
		max_area = 0;
		sum_area = 0;
		CvSeq *max_contour = contour;
		for( ; contour != 0; contour = contour->h_next) {
			area = contour->total + (int)(fabs(cvContourArea(contour, CV_WHOLE_SEQ)));
			sum_area += area;
			if (area > max_area) {
				max_area = area;
				max_contour = contour;
			}
		}
		if (sum_area-max_area > 0) {
			scores[threshold-1] = max_area / (sum_area-max_area);
			//printf("max_area: %d, max_contour: %d, sum_area: %d; scores[%d]: %lf\n", 
			//        max_area, max_contour->total, sum_area, threshold-1, scores[threshold-1]);      
		}
		else
			continue;

		if (scores[threshold-1] - scores[threshold] < 0) {
			//found the corneal reflection
			crar = (int)sqrt(max_area / PI);
			int sum_x = 0;
			int sum_y = 0;
			CvPoint *point;
			for (i = 0; i < max_contour->total; i++) {
				point = CV_GET_SEQ_ELEM(CvPoint, max_contour, i);
				sum_x += point->x;
				sum_y += point->y;
			}
			crx = sum_x/max_contour->total;
			cry = sum_y/max_contour->total;
			break;
		}
	}

	free(scores);
	cvReleaseMemStorage(&storage);
	cvResetImageROI(grayEyeImagePts);
	cvResetImageROI(threshold_image);

	if (crar > biggest_crar) {
		//printf("(corneal) size wrong! crx:%d, cry:%d, crar:%d (should be less than %d)\n", crx, cry, crar, biggest_crar);
		cry = crx = -1;
		crar = -1;
	}

	if (crx != -1 && cry != -1) {
		//  printf("(corneal) startx:%d, starty:%d, crx:%d, cry:%d, crar:%d\n", startx, starty, crx, cry, crar);
		crx += startx;
		cry += starty;
	}

}
Example #25
0
//--------------------------------------------------------------
void testApp::update(){
	

    bool bNewFrame = false;

	#ifdef _USE_LIVE_VIDEO
       vidGrabber.grabFrame();
	   bNewFrame = vidGrabber.isFrameNew();
    #else
        vidPlayer.idleMovie();
        bNewFrame = vidPlayer.isFrameNew();
	#endif

	if (bNewFrame){

		#ifdef _USE_LIVE_VIDEO
            colorImg.setFromPixels(vidGrabber.getPixels(), cw,ch);
	    #else
            colorImg.setFromPixels(vidPlayer.getPixels(), cw,ch);
        #endif

		
		kx = (float) ofGetWidth()  / cw;
		ky = (float) ofGetHeight() / ch;
		
		cvSmooth(colorImg.getCvImage(), medianImg.getCvImage(), CV_MEDIAN, medianValue, medianValue);
		medianImg.flagImageChanged();
				
		grayImage = medianImg;
		
		cvCvtColor(colorImg.getCvImage(), hsvImage.getCvImage(), CV_RGB2HSV);
		hsvImage.flagImageChanged();
		
		cvSetImageCOI(hsvImage.getCvImage(), 2);
		cvCopy(hsvImage.getCvImage(), satImage.getCvImage());
		satImage.flagImageChanged();
		cvSetImageCOI(hsvImage.getCvImage(), 0);
		
		//cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_BLUR, 3, 3, 0, 0);
		
		cvAdaptiveThreshold(grayImage.getCvImage(), trsImage.getCvImage(), 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, adaptiveThreshValue);
		//cvCanny(trsImage.getCvImage(), trsImage.getCvImage(), sb, sb*4, 3);
		trsImage.flagImageChanged();
		
		
//		cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_MEDIAN, 7, 7);
		
//		cvSmooth( iplImage, iplImage, CV_BLUR, br, br, 0, 0 );
//		cvSmooth( iplImage, iplImage, CV_MEDIAN, 7, 7);
		cvCanny(  grayImage.getCvImage(), cannyImage.getCvImage(), cannyThresh1Value, cannyThresh2Value, cannyApertureValue);
		cannyImage.flagImageChanged();
			
		//cvPyrMeanShiftFiltering(colorImg.getCvImage(), colorImg.getCvImage(), 20, 40, 2);
		
		if (mode==MODE_DRAWING) {

			if (draw_edges) {

				#if PROBABILISTIC_LINE
					lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, lineThreshValue, lineMinLengthValue, lineMaxGapValue );
				#else
					lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );
				#endif
			
			}
			
			if (draw_contours || draw_approx) {
				cvFindContours(cannyImage.getCvImage(), edgesStorage, &edgeContours);
				
				CvSeq* contour = edgeContours;
				while (contour!=NULL) {
					for (int j = 0; j < contour->total; j++){
						CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
						p1->x = p1->x*(float)kx;
						p1->y = p1->y*(float)ky;
					}
					contour = contour->h_next;
				}
				
			}

			if (draw_fills) {
				cvFindContours(trsImage.getCvImage(), fillsStorage, &fillContours);
				
				CvSeq* contour = fillContours;
				while (contour!=NULL) {
					for (int j = 0; j < contour->total; j++){
						CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
						p1->x = p1->x*(float)kx;
						p1->y = p1->y*(float)ky;
					}
					contour = contour->h_next;
				}
			}
		}

	}
	
	
	// update scope
//	float* rand = new float[50];
//	for(int i=0 ;i<50; i++){
//		rand[i] = ofRandom(-1.0,1);
//		
//	}
//	
//	gui->update(scope, kofxGui_Set_FloatArray, rand, sizeof(float*));
//	
//	// make 3 seconds loop
//	float f = ((ofGetElapsedTimeMillis()%3000) / 3000.0);
//	gui->update(points, kofxGui_Set_Float, &f, sizeof(float));

}
int cam() //calling main
{
    int hdims = 16;
    printf("I am main");
    CvCapture* capture = cvCreateCameraCapture(1); //determining usb camera
    CvHistogram *hist = 0;
    CvMemStorage* g_storage = NULL;
    Display *display=construct_display();
    int x,y, tmpx=0, tmpy=0, chk=0;
    IplImage* image=0;
    IplImage* lastimage1=0;
    IplImage* lastimage=0;
    IplImage* diffimage;
    IplImage* bitimage;
    IplImage* src=0,*hsv=0,*hue=0,*backproject=0;
    IplImage* hsv1=0,*hue1=0,*histimg=0,*frame=0,*edge=0;
    float* hranges;
    cvNamedWindow( "CA", CV_WINDOW_AUTOSIZE ); //display window 3
    //Calculation of Histogram//
    cvReleaseImage(&src);
    src= cvLoadImage("images/skin.jpg"); //taking patch
    while(1)
    {
        frame = cvQueryFrame( capture ); //taking frame by frame for image prcessing
        int j=0;
        float avgx=0;
        float avgy=0;
        if( !frame ) break;
        //#########################Background Substraction#########################//
        if(!image)
        {
            image=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
            bitimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
            diffimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
            lastimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
        }
        cvCvtColor(frame,image,CV_BGR2GRAY);
        if(!lastimage1)
        {
            lastimage1=cvLoadImage("images/img.jpg");
        }
        cvCvtColor(lastimage1,lastimage,CV_BGR2GRAY);
        cvAbsDiff(image,lastimage,diffimage);
        cvThreshold(diffimage,bitimage,65,225,CV_THRESH_BINARY);
        cvInRangeS(bitimage,cvScalar(0),cvScalar(30),bitimage);
        cvSet(frame,cvScalar(0,0,0),bitimage);
        cvReleaseImage(&hsv);
        hsv= cvCreateImage( cvGetSize(src), 8, 3 );
        cvReleaseImage(&hue);
        hue= cvCreateImage( cvGetSize(src), 8, 1);
        cvCvtColor(src,hsv,CV_BGR2HSV);
        cvSplit(hsv,hue,0,0,0);
        float hranges_arr[] = {0,180};
        hranges = hranges_arr;
        hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
        cvCalcHist(&hue, hist, 0, 0 );
        cvThreshHist( hist, 100 );
        //#############################Display histogram##############################//
        cvReleaseImage(&histimg);
        histimg = cvCreateImage( cvSize(320,200), 8, 3 );
        cvZero( histimg );
        int bin_w = histimg->width / hdims;
        //#### Calculating the Probablity of Finding the skin with in-built method ###//
        if(0)
        {
            free (backproject);
            free (hsv1);
            free (hue1);
        }
        cvReleaseImage(&backproject);
        backproject= cvCreateImage( cvGetSize(frame), 8, 1 );
        cvReleaseImage(&hsv1);
        hsv1 = cvCreateImage( cvGetSize(frame), 8, 3);
        cvReleaseImage(&hue1);
        hue1 = cvCreateImage( cvGetSize(frame), 8, 1);
        cvCvtColor(frame,hsv1,CV_BGR2HSV);
        cvSplit(hsv1,hue1,0,0,0);
        cvCalcBackProject( &hue1, backproject, hist );
        cvSmooth(backproject,backproject,CV_GAUSSIAN);
        cvSmooth(backproject,backproject,CV_MEDIAN);
        if( g_storage == NULL )
        g_storage = cvCreateMemStorage(0);
        else
        cvClearMemStorage( g_storage );
        CvSeq* contours=0;
        CvSeq* result =0;
        cvFindContours(backproject, g_storage, &contours );
        if(contours)
        {
            result=cvApproxPoly(contours, sizeof(CvContour), g_storage,
            CV_POLY_APPROX_DP, 7, 1);
        }
        cvZero( backproject);
        for( ; result != 0; result = result->h_next )
        {
            double area = cvContourArea( result );
            cvDrawContours( backproject,result, CV_RGB(255,255, 255), CV_RGB(255,0, 255)
            , -1,CV_FILLED, 8 );
            for( int i=1; i<=result-> total; i++ )
            {
                if(i>=1 and abs(area)>300)
                {
                    CvPoint* p2 = CV_GET_SEQ_ELEM( CvPoint, result, i );
                    if(1)
                    {
                        avgx=avgx+p2->x;
                        avgy=avgy+p2->y;
                        j=j+1;
                        cvCircle(backproject,cvPoint(p2->x,p2->y ),10,
                        cvScalar(255,255,255));
                    }
                }
            }
        }
        cvCircle( backproject, cvPoint(avgx/j, avgy/j ), 40, cvScalar(255,255,255) );
        x = ( avgx/j );
        y = ( avgy/j );
        x=( (x*1240)/640 )-20;
        y=( (y*840)/480 )-20;
        if ( (abs(tmpx-x)>6 or abs(tmpy-y)>6 ) and j )
        {
            tmpx = x;
            tmpy = y;
            chk=0;
        }
        else chk++;
        mouse_move1( tmpx, tmpy, display );
        if ( chk==10 )
        {
            mouse_click( 5, 2, display );
            mouse_click( 5, 3, display );
        }
        cvSaveImage( "final.jpg", frame );
        cvSaveImage( "final1.jpg", backproject );
        cvShowImage( "CA", backproject );
        char c = cvWaitKey(33);
        if( c == 27 )
        break; //function break and destroying windows if press <escape> key
    }
    cvReleaseCapture( &capture );
    cvDestroyWindow( "CA" );
}
Example #27
0
static CvSeq *
get_finger_defects (guint16* depth,
                    guint width,
                    guint height,
                    SkeltrackJointList list)
{
  CvSeq *defects = NULL;
  SkeltrackJoint *head, *left_hand, *right_hand, *hand = NULL;

  if (list == NULL)
    return NULL;

  head = skeltrack_joint_list_get_joint (list, SKELTRACK_JOINT_ID_HEAD);
  right_hand = skeltrack_joint_list_get_joint (list, SKELTRACK_JOINT_ID_RIGHT_HAND);
  left_hand = skeltrack_joint_list_get_joint (list, SKELTRACK_JOINT_ID_LEFT_HAND);

  if (head == NULL || (left_hand == NULL && right_hand == NULL))
    return NULL;

  if (right_hand == NULL)
    hand = left_hand;
  else if (left_hand == NULL)
    hand = right_hand;
  else
    {
      if (right_hand->z < left_hand->z &&
          ABS (right_hand->z - head->z) > 150)
        {
          hand = right_hand;
        }
      else if (left_hand->z < right_hand->z &&
               ABS (left_hand->z - head->z) > 150)
        {
          hand = left_hand;
        }
    }

  if (hand == NULL)
    return NULL;


  defects = get_defects (depth,
                         width,
                         height,
                         hand->screen_x,
                         hand->screen_y,
                         hand->z);

  if (defects)
    {
      gfloat angle;
      guint i;

      for (i = 0; i < defects->total; i++)
        {
          CvConvexityDefect *defect = CV_GET_SEQ_ELEM (CvConvexityDefect, defects, i);
          angle = get_angle (defect->start, defect->depth_point, defect->end);
          if (angle > M_PI_2)
            {
              cvSeqRemove(defects, i);
              i--;
            }
        }

      return defects;
    }

  return NULL;
}