Ejemplo n.º 1
0
static void  keyEvent( unsigned char key, int x, int y)
{
    int             i;

    if( key == 0x1b || key == 'q' || key == 'Q' ) {
        cleanup();
    }

    if( cornerFlag && key==' ' ) {
        cvFindCornerSubPix( calibImageL, cornersL, chessboardCornerNumX*chessboardCornerNumY, cvSize(5,5),
                            cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.1)  );
        for( i = 0; i < chessboardCornerNumX*chessboardCornerNumY; i++ ) {
            arParamObserv2Ideal(paramL.dist_factor, (double)cornersL[i].x, (double)cornersL[i].y,
                                &calibData[capturedImageNum].screenCoordL[i].x, &calibData[capturedImageNum].screenCoordL[i].y, paramL.dist_function_version);
        }
        cvFindCornerSubPix( calibImageR, cornersR, chessboardCornerNumX*chessboardCornerNumY, cvSize(5,5),
                            cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.1)  );
        for( i = 0; i < chessboardCornerNumX*chessboardCornerNumY; i++ ) {
            arParamObserv2Ideal(paramR.dist_factor, (double)cornersR[i].x, (double)cornersR[i].y,
                                &calibData[capturedImageNum].screenCoordR[i].x, &calibData[capturedImageNum].screenCoordR[i].y, paramR.dist_function_version);
        }
        ARLOG("---------- %2d/%2d -----------\n", capturedImageNum+1, calibImageNum);
        for( i = 0; i < chessboardCornerNumX*chessboardCornerNumY; i++ ) {
            ARLOG("  %f, %f  ----   %f, %f\n", calibData[capturedImageNum].screenCoordL[i].x, calibData[capturedImageNum].screenCoordL[i].y,
                  calibData[capturedImageNum].screenCoordR[i].x, calibData[capturedImageNum].screenCoordR[i].y);
        }
        ARLOG("---------- %2d/%2d -----------\n", capturedImageNum+1, calibImageNum);
        capturedImageNum++;

        if( capturedImageNum == calibImageNum ) {
            calib();
            cleanup();
        }
    }
}
Ejemplo n.º 2
0
std::vector<CvPoint2D32f> findCorners(Image<PixRGB<byte> > &img, int rows, int cols)
{

  int count = 0;

  std::vector<CvPoint2D32f> corners(rows*cols);

  Image<byte> in = luminance(img);

  int result = cvFindChessboardCorners(img2ipl(in), cvSize(rows,cols),
      &corners[0], &count,
      CV_CALIB_CB_ADAPTIVE_THRESH |
      CV_CALIB_CB_NORMALIZE_IMAGE |
      CV_CALIB_CB_FILTER_QUADS);

  // result = 0 if not all corners were found
  // Find corners to an accuracy of 0.1 pixel
        if(result != 0)
  {
                cvFindCornerSubPix(img2ipl(in),
        &corners[0],
        count,
        cvSize(10,10), //win
        cvSize(-1,-1), //zero_zone
        cvTermCriteria(CV_TERMCRIT_ITER,1000,0.01) );
    return corners;
  } else {
    return std::vector<CvPoint2D32f>();
  }


}
Ejemplo n.º 3
0
static void  keyEvent( unsigned char key, int x, int y)
{
    CvPoint2D32f   *p1, *p2;
    int             i;

    if( key == 0x1b || key == 'q' || key == 'Q' ) {
        cleanup();
    }

    if( cornerFlag && key==' ' ) {
        cvFindCornerSubPix( calibImage, corners, chessboardCornerNumX*chessboardCornerNumY, cvSize(5,5),
                            cvSize(-1,-1), cvTermCriteria (CV_TERMCRIT_ITER, 100, 0.1)  );
        p1 = &corners[0];
        p2 = &cornerSet[capturedImageNum*chessboardCornerNumX*chessboardCornerNumY];
        for( i = 0; i < chessboardCornerNumX*chessboardCornerNumY; i++ ) {
            *(p2++) = *(p1++);
        }
        capturedImageNum++;
        ARLOG("---------- %2d/%2d -----------\n", capturedImageNum, calibImageNum);
        for( i = 0; i < chessboardCornerNumX*chessboardCornerNumY; i++ ) {
            ARLOG("  %f, %f\n", corners[i].x, corners[i].y);
        }
        ARLOG("---------- %2d/%2d -----------\n", capturedImageNum, calibImageNum);

        if( capturedImageNum == calibImageNum ) {
            calib();
            cleanup();
        }
    }
}
Ejemplo n.º 4
0
bool CybCamCalibration::generateDataToCalibration(IplImage *rgb_image) {

    IplImage *view_gray;
    int found = 0, count = 0;

    img_size = cvGetSize(rgb_image);

    found = cvFindChessboardCorners( rgb_image, board_size,
                                     image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH );

    // improve the found corners' coordinate accuracy
    view_gray = cvCreateImage( cvGetSize(rgb_image), 8, 1 );
    cvCvtColor( rgb_image, view_gray, CV_BGR2GRAY );

    cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11),
                        cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

    cvReleaseImage( &view_gray );

    if( found )
    {
        cvSeqPush( image_points_seq, image_points_buf );

        sprintf( imagename, "image%03d.png", image_points_seq->total - 1 );
        cvSaveImage( imagename, rgb_image );
    }

    cvDrawChessboardCorners( rgb_image, board_size, image_points_buf, count, found );

    return ((unsigned)image_points_seq->total >= (unsigned)image_count);
}
Ejemplo n.º 5
0
public:bool analizarFlujo(IplImage *img, IplImage *imgAnterior, CvRect *rect) {

            cvSetImageROI(img, *rect);
            IplImage *imgA1 = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
            cvCopy(img, imgA1);

            cvSetImageROI(imgAnterior, *rect);
            IplImage *imgB1 = cvCreateImage(cvGetSize(imgAnterior), imgAnterior->depth, imgAnterior->nChannels);
            cvCopy(imgAnterior, imgB1);

            cvResetImageROI(img);
            cvResetImageROI(imgAnterior);

            cvNamedWindow( "img", 1);
            cvNamedWindow( "imgA", 1);
            cvShowImage( "img", imgA1);
            cvShowImage( "imgA", imgB1);


            int py = imgA1->height;
            int px = imgA1->width;
            IplImage *imgA=cvCreateImage( cvSize(px,py),IPL_DEPTH_8U, 1);
            IplImage *imgB=cvCreateImage( cvSize(px,py),IPL_DEPTH_8U, 1);
            cvCvtColor( imgA1, imgA, CV_BGR2GRAY ); //
            cvCvtColor( imgB1, imgB, CV_BGR2GRAY ); //

            CvSize img_sz = cvGetSize( imgA );

            /////////////////////////////
            IplImage *eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
            IplImage *tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
            int corner_count = MAX_CORNERS;
            CvSize pyr_sz;
            int win_size = 5;


            cvGoodFeaturesToTrack(imgA,eig_image,tmp_image,cornersA,&corner_count,0.01,5.0,0,3,0,0.04);
            cvFindCornerSubPix(imgA,cornersA,corner_count,cvSize(win_size,win_size),cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,1));
            // Call the Lucas Kanade algorithm

            pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
            pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
            pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );

            cvCalcOpticalFlowPyrLK(imgA,imgB,pyrA,pyrB,cornersA,cornersB,corner_count,cvSize( win_size,win_size ),5,features_found,feature_errors,cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .03 ),0);
            // Now make some image of what we are looking at:
            float dx=0.0;

            for (int i=0; i<corner_count; i++) {
                if( features_found[i]==0|| feature_errors[i]>100 ) {continue;}

                dx=sqrt((cornersA[i].x-cornersB[i].x)*(cornersA[i].x-cornersB[i].x)+(cornersA[i].y-cornersB[i].y)*(cornersA[i].y-cornersB[i].y));
                if(dx>1 && dx<50){
                    return true;
                } else {
                    return false;
                }
            }
            return false;
    }
Ejemplo n.º 6
0
int main(int argc, char* argv[])
{
	board_w = 5; // Board width in squares
	board_h = 8; // Board height 
	n_boards = 8; // Number of boards
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize( board_w, board_h );
	CvCapture* capture = cvCreateCameraCapture( 0 );
	assert( capture );

	cvNamedWindow( "Calibration" );
	// Allocate Sotrage
	CvMat* image_points		= cvCreateMat( n_boards*board_n, 2, CV_32FC1 );
	CvMat* object_points		= cvCreateMat( n_boards*board_n, 3, CV_32FC1 );
	CvMat* point_counts			= cvCreateMat( n_boards, 1, CV_32SC1 );
	CvMat* intrinsic_matrix		= cvCreateMat( 3, 3, CV_32FC1 );
	CvMat* distortion_coeffs	= cvCreateMat( 5, 1, CV_32FC1 );

	CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
	int corner_count;
	int successes = 0;
	int step, frame = 0;

	IplImage *image = cvQueryFrame( capture );
	IplImage *gray_image = cvCreateImage( cvGetSize( image ), 8, 1 );

	// Capture Corner views loop until we've got n_boards
	// succesful captures (all corners on the board are found)

	while( successes < n_boards ){
		// Skp every board_dt frames to allow user to move chessboard
		if( frame++ % board_dt == 0 ){
			// Find chessboard corners:
			int found = cvFindChessboardCorners( image, board_sz, corners,
				&corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );

			// Get subpixel accuracy on those corners
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
			cvFindCornerSubPix( gray_image, corners, corner_count, cvSize( 11, 11 ), 
				cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

			// Draw it
			cvDrawChessboardCorners( image, board_sz, corners, corner_count, found );
			cvShowImage( "Calibration", image );

			// If we got a good board, add it to our data
			if( corner_count == board_n ){
				step = successes*board_n;
				for( int i=step, j=0; j < board_n; ++i, ++j ){
					CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
					CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
					CV_MAT_ELEM( *object_points, float, i, 0 ) = j/board_w;
					CV_MAT_ELEM( *object_points, float, i, 1 ) = j%board_w;
					CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
				}
				CV_MAT_ELEM( *point_counts, int, successes, 0 ) = board_n;
				successes++;
			}
		} 
Ejemplo n.º 7
0
void cv::cornerSubPix( const Mat& image, vector<Point2f>& corners,
                       Size winSize, Size zeroZone,
                       TermCriteria criteria )
{
    CvMat _image = image;
    cvFindCornerSubPix(&_image, (CvPoint2D32f*)&corners[0], (int)corners.size(),
                       winSize, zeroZone, criteria );
}
//
//	コーナーを検出する
//
//	引数:
//      frameImage : キャプチャ画像用IplImage
//      grayImage  : グレースケール画像用IplImage
//      corners    : コーナーの位置を格納する変数
//
//	戻り値:
//		0   : コーナーがすべて検出できなかった場合
//		非0 : コーナーがすべて検出された場合
//
int findCorners( IplImage *frameImage, IplImage *grayImage, CvPoint2D32f *corners ) {
	int cornerCount;				//	検出したコーナーの数
	int findChessboardCornersFlag;	//	cvFindChessboardCorners用フラグ
	int findFlag;					//	コーナーがすべて検出できたかのフラグ
	
	IplImage* m_image_binary;
	IplImage* m_set_image;
	
	m_image_binary     = cvCreateImage(cvSize(frameImage->width, frameImage->height), IPL_DEPTH_8U, 1);
	m_set_image     = cvCreateImage(cvSize(frameImage->width, frameImage->height), IPL_DEPTH_8U, 3);

	//	cvChessboardCorners用フラグを生成する
	findChessboardCornersFlag = createFindChessboardCornersFlag();
	
	// 画像をBinaryImageとして変換する。
	//	コーナーを検出する
	cvCvtColor( frameImage, grayImage, CV_BGR2GRAY );

    //	グレースケールから2値に変換する
    cvThreshold( grayImage, m_image_binary, 128, 255, CV_THRESH_BINARY );

    // Convert to 3channel image
    cvMerge(m_image_binary, m_image_binary, m_image_binary, NULL, m_set_image);

	findFlag=cvFindChessboardCorners(
		m_set_image,
		//m_set_image,
		//cvSize( CORNER_WIDTH, CORNER_HEIGHT ),
		board_sz,
		corners,
		&cornerCount,
		findChessboardCornersFlag
	);
	
	if( findFlag != 0 ) {
		//	コーナーがすべて検出された場合
		//	検出されたコーナーの位置をサブピクセル単位にする

		CvTermCriteria criteria={ CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, MAX_ITERATIONS, EPSILON };
		cvFindCornerSubPix(
			grayImage,
			corners,
			cornerCount,
			cvSize( SEARCH_WINDOW_HALF_WIDTH, SEARCH_WINDOW_HALF_HEIGHT ),
			cvSize( DEAD_REGION_HALF_WIDTH, DEAD_REGION_HALF_HEIGHT ), 
			cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, MAX_ITERATIONS, EPSILON )
		);
	}
		
	//	コーナーの位置を描く
	cvDrawChessboardCorners( frameImage, board_sz, corners, cornerCount, findFlag );
	
	cvReleaseImage(&m_set_image);
	cvReleaseImage(&m_image_binary);
	
	return findFlag;
}
Ejemplo n.º 9
0
int opticalflow( char * im1fname, char * im2fname, CvPoint2D32f * &source_points, CvPoint2D32f * &dest_points, char * &status )
{
	int count = MAX_COUNT;
	double quality = 0.15;
	// double min_distance = 2;
	double min_distance = 3;
	int block_size = 7;
	int use_harris = 0;
	int win_size = 10;
	int flags = 0;

	source_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	dest_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	
	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_GRAYSCALE);

	IplImage * eigenvalues = cvCreateImage(cvGetSize(image1), 32, 1);
	IplImage * temp = cvCreateImage(cvGetSize(image1), 32, 1);

	cvGoodFeaturesToTrack( image1, eigenvalues, temp, source_points, &count,
			quality, min_distance, 0, block_size, use_harris, 0.04 );

	printf("%d features\n",count);

	setbuf(stdout, NULL);

	printf("Finding corner subpix...");
	cvFindCornerSubPix( image1, source_points, count,
			cvSize(win_size,win_size), cvSize(-1,-1),
			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
	printf("done.\n");

	cvReleaseImage(&eigenvalues);
	cvReleaseImage(&temp);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_GRAYSCALE);

	status = (char*)cvAlloc(sizeof(char)*MAX_COUNT);

	IplImage * pyramid = cvCreateImage( cvGetSize(image1), IPL_DEPTH_8U, 1 );
	IplImage * second_pyramid = cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 1 );

	printf("Computing optical flow...");	
	cvCalcOpticalFlowPyrLK(image1, image2, pyramid, second_pyramid, source_points,
		dest_points, count, cvSize(win_size,win_size), 4, status, 0,
		cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03),
		flags);
	printf("done.\n");

	cvReleaseImage( &image1 );
	cvReleaseImage( &image2 );

	cvReleaseImage( &pyramid );
	cvReleaseImage( &second_pyramid );

	return count;
}
Ejemplo n.º 10
0
void improve_precision(IplImage *image, CvPoint2D32f * cornersArray, int cornersCount)
{
  // subPix tool requires a gray scale:
  IplImage * gray_image = cvCreateImage(cvSize(image->width,image->height), 8, 1);
  cvCvtColor(image, gray_image, CV_BGR2GRAY);

  // we use 2 different terminal criteria: nomber of iterations and/or precision (first to be reached)
  cvFindCornerSubPix(gray_image, cornersArray, cornersCount,cvSize(11,11),cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS +CV_TERMCRIT_ITER, 30, 0.1 ));

}
Ejemplo n.º 11
0
void OpticalFlow::PrepareTracking(IplImage* rgbImage,
                                  IplImage* currGrayImage, int curr_indx,
                                  ProbDistrProvider* pProbProv,
                                  const CuScanMatch& match,
                                  ConstMaskIt mask,
                                  int target_num_features,
                                  int winsize_width,
                                  int winsize_height,
                                  double min_distance,
                                  double max_feature_error)
{
  m_pProbDistrProvider = pProbProv;
  m_target_num_features = target_num_features;
  m_num_features_tracked = 0;
  m_prev_buf_meaningful = false;
  m_winsize_width = winsize_width;
  m_winsize_height = winsize_height;
  m_min_distance = min_distance;
  m_max_feature_error = max_feature_error;
  
  // first find a big set of features that sits on corners
  int num_corners = m_target_num_features*3;
  CPointVector corners;
  corners.resize(num_corners);
  CRect bbox(match);
  FindGoodFeatures(currGrayImage, bbox, corners);

  // then play with the color probability distribution to pick
  // the ones that are on skin color, or if those aren't enough,
  // pick some additional ones on skin colored pixels
  m_features[0].resize(m_target_num_features);
  m_features[1].resize(m_target_num_features);
  m_feature_status.resize(m_target_num_features);
  m_errors.resize(m_target_num_features);
  PickSkinColoredFeatures(rgbImage, corners, m_features[curr_indx], match, mask);

  // fine-tune feature locations
  cvFindCornerSubPix(currGrayImage,
                     (CvPoint2D32f*) &m_features[curr_indx][0],
                     m_target_num_features, 
                     cvSize(5,5), cvSize(-1,-1),
                     cvTermCriteria( CV_TERMCRIT_ITER, 10, 0.1f ));

  // set status right for these features
  for (int i=0; i<m_target_num_features; i++) {
    m_feature_status[i] = 1;
  }
  GetAverage(m_features[curr_indx], m_mean_feature_pos);

  m_condens_is_tracking = false;
  m_condens_init_rect = CRect(match);

  m_prepared = true;
}
Ejemplo n.º 12
0
  //borrowed from Patrick...
  int do_calibration() {
    IplImage* image = NULL; 
    int xc = 12;
    int yc = 12;
    int nc = xc*yc;
    float side_length = 15;

    CvSize board_sz = cvSize(xc, yc);

    int ntrials = img_cnt;

    char filename[128];

    CvMat* object_points     = cvCreateMat( ntrials * nc, 3, CV_32FC1 );
    CvMat* image_points      = cvCreateMat( ntrials * nc, 2, CV_32FC1 );
    CvMat* point_counts      = cvCreateMat( ntrials, 1, CV_32SC1 );
    
    CvPoint2D32f* corners = new CvPoint2D32f[nc];

    for(int t=1; t<=ntrials; t++) {
      // load an image
      sprintf(filename, "%s/Image%d.jpg", dir_name, t);
      image = cvLoadImage(filename);
      if(!image){
	printf("Could not load image file: %s\n",filename);
	exit(0);
      }
      
      int corner_count;
      int found = cvFindChessboardCorners(image, board_sz, corners, &corner_count, 
					  CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
      
      IplImage* gray = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1);
      cvCvtColor(image, gray, CV_BGR2GRAY);
      
      cvFindCornerSubPix(gray, corners, corner_count, 
			 cvSize(5, 5), cvSize(-1, -1),
			 cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.01f ));
      
      printf("Image: %d, %d / %d\n", t, corner_count, nc);
      
      // get data points
      if(corner_count == nc && found != 0) {
	for(int c=0; c<nc; c++) {
	  CV_MAT_ELEM( *image_points,  float, (t-1)*nc+c, 0 ) = corners[c].x;
	  CV_MAT_ELEM( *image_points,  float, (t-1)*nc+c, 1 ) = corners[c].y;
	  CV_MAT_ELEM( *object_points, float, (t-1)*nc+c, 0 ) = (float)(c/xc)*side_length;
	  CV_MAT_ELEM( *object_points, float, (t-1)*nc+c, 1 ) = (float)(c%xc)*side_length;
	  CV_MAT_ELEM( *object_points, float, (t-1)*nc+c, 2 ) = 0.0f;
	}
	CV_MAT_ELEM( *point_counts,  int, t-1, 0 ) = nc;
      }
      else printf("Bad board! How did this happen?\n");
    }
Ejemplo n.º 13
0
void cv::cornerSubPix( InputArray _image, InputOutputArray _corners,
                       Size winSize, Size zeroZone,
                       TermCriteria criteria )
{
    Mat corners = _corners.getMat();
    int ncorners = corners.checkVector(2);
    CV_Assert( ncorners >= 0 && corners.depth() == CV_32F );
    Mat image = _image.getMat();
    CvMat c_image = image;
    
    cvFindCornerSubPix( &c_image, (CvPoint2D32f*)corners.data, ncorners,
                        winSize, zeroZone, criteria );
}
Ejemplo n.º 14
0
 void Calibrate(){
     CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
     int corner_count;
     int successes = 0;
     int step, frame = 0;
     
     IplImage *image = cvQueryFrame( capture );
     
     /*(IplImage *image = cvQueryFrame( capture );
     cvSetImageROI(image, cvRect(300, 300, 600, 600));
     tmp = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
     cvCopy(image, tmp, NULL);
     cvResetImageROI(image);
     image = cvCloneImage(tmp);//*/
     
     IplImage *gray_image = cvCreateImage( cvGetSize( image ), 8, 1 );
     
     // Capture Corner views loop until we've got n_boards
     // successful captures (all corners on the board are found)
     
     while( successes < n_boards ){
         // Skp every board_dt frames to allow user to move chessboard
         if( frame++ % board_dt == 0 ){
             // Find chessboard corners:
             int found = cvFindChessboardCorners( image, board_sz, corners,
                                                 &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );
             
             // Get subpixel accuracy on those corners
             cvCvtColor( image, gray_image, CV_BGR2GRAY );
             cvFindCornerSubPix( gray_image, corners, corner_count, cvSize( 11, 11 ),
                                cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
             
             // Draw it
             cvDrawChessboardCorners( image, board_sz, corners, corner_count, found );
             //cvShowImage( "Calibration", image );
             
             // If we got a good board, add it to our data
             if( corner_count == board_n ){
                 step = successes*board_n;
                 for( int i=step, j=0; j < board_n; ++i, ++j ){
                     CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
                     CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
                     CV_MAT_ELEM( *object_points, float, i, 0 ) = j/board_w;
                     CV_MAT_ELEM( *object_points, float, i, 1 ) = j%board_w;
                     CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
                 }
                 CV_MAT_ELEM( *point_counts, int, successes, 0 ) = board_n;
                 successes++;
             }
         }
Ejemplo n.º 15
0
// ######################################################################
void NeoBrain::setTarget(const Point2D<int> loc, const Image<byte>& grey,
                         const int saliencyval, bool changeState)
{
  if (!itsAllowTracking.getVal())
    return;

#ifdef HAVE_OPENCV
  count = MAX_COUNT;

  IplImage* tmp = img2ipl(grey);
  if (count > 1)
  {
    IplImage* eig = cvCreateImage(cvGetSize(tmp), 32, 1);
    IplImage* temp = cvCreateImage(cvGetSize(tmp), 32, 1);
    double quality = 0.01;
    double min_distance = 5;

    cvGoodFeaturesToTrack(tmp, eig, temp, points[1], &count,
        quality, min_distance, 0, 3, 0, 0.04);
    cvReleaseImage(&eig);
    cvReleaseImage(&temp);

  } else {
    //get from the saliency map
    points[1][0].x = loc.i;
    points[1][0].y = loc.j;

  }
  cvFindCornerSubPix(tmp, points[1], count,
      cvSize(win_size,win_size), cvSize(-1,-1),
      cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,
        20,0.03));
  cvReleaseImageHeader(&tmp);

  IplImage *swap_temp;
  prev_grey = grey;
  CV_SWAP( prev_pyramid, pyramid, swap_temp );
  CV_SWAP( points[0], points[1], swap_points );


  //LINFO("Init %i point (%f,%f)\n", count, points[1][0].x, points[1][0].y);

  if (changeState)
    itsState = CHECK_TARGET;
  itsTracking = true;
#endif

  if (saliencyval >= 0)
    this->saySaliencyVal(byte(saliencyval));
}
Ejemplo n.º 16
0
void get_calib(CvCapture* capture) {
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	cvNamedWindow("Calibration");
	CvMat* image_points = cvCreateMat(n_boards * board_n, 2, CV_32FC1);
	CvMat* object_points = cvCreateMat(n_boards * board_n, 3, CV_32FC1);
	CvMat* point_counts = cvCreateMat(n_boards, 1, CV_32FC1);
	CvMat* intrinsic_matrix = cvCreateMat(3, 3, CV_32FC1);
	CvMat* distortion_coeffs = cvCreateMat(5, 1, CV_32FC1);

	CvPoint2D32f* corners = new CvPoint2D32f[board_n];
	int corner_count;
	int successes = 0;
	int step, frame = 0;
	IplImage *image = cvQueryFrame(capture);
	IplImage *gray_image = cvCreateImage(cvGetSize(image), 8, 1);


	while(successes < n_boards) {
		if(frame++ % board_dt == 0) {
			int found = cvFindChessboardCorners(
					image, board_sz, corners, &corner_count,
					CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
			cvCvtColor(image, gray_image, CV_RGB2GRAY);
			cvFindCornerSubPix(gray_image, corners, corner_count, 
					cvSize(11,11), cvSize(-1, -1), 
					cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
			cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
			cvShowImage("Calibration", image);
			//get a good board, add to data
			if(corner_count == board_n) {
				printf("here\n");
				step = successes * board_n;
				for(int i = step, j = 0; j < board_n; ++i, ++j) {
					CV_MAT_ELEM(*image_points, float, i, 0) = corners[j].x;
					CV_MAT_ELEM(*image_points, float, i, 1) = corners[j].y;
					CV_MAT_ELEM(*object_points, float, i, 0) = j / board_w;
					CV_MAT_ELEM(*object_points, float, i, 1) = j % board_w;
					CV_MAT_ELEM(*object_points, float, i, 2) = 0.0f;
				}
				
				CV_MAT_ELEM(*point_counts, int, successes, 0) = board_n;
				successes++;
				printf("Nice shot!\n");
				int k = cvWaitKey(-1);
				while(k != 'n') k = cvWaitKey(-1);
			}
		}
Ejemplo n.º 17
0
int StereoVision::calibrationAddSample(IplImage* imageLeft,IplImage* imageRight) {

    if(!calibrationStarted) return RESULT_FAIL;

    IplImage* image[2] = {imageLeft,imageRight};

    int succeses = 0;

    for(int lr=0; lr<2; lr++) {
        CvSize imageSize =  cvGetSize(image[lr]);

        if(imageSize.width != this->imageSize.width || imageSize.height != this->imageSize.height)
            return RESULT_FAIL;

        int cornersDetected = 0;

        //FIND CHESSBOARDS AND CORNERS THEREIN:
        int result = cvFindChessboardCorners(
                         image[lr], cvSize(cornersX, cornersY),
                         &ponintsTemp[lr][0], &cornersDetected,
                         CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE
                     );

        if(result && cornersDetected == cornersN) {


            //Calibration will suffer without subpixel interpolation
            cvFindCornerSubPix(
                image[lr], &ponintsTemp[lr][0], cornersDetected,
                cvSize(11, 11), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30, 0.01)
            );
            succeses++;
        }

    }
    if(2==succeses) {
        for(int lr=0; lr<2; lr++) {
            points[lr].resize((sampleCount+1)*cornersN);
            copy( ponintsTemp[lr].begin(), ponintsTemp[lr].end(),  points[lr].begin() + sampleCount*cornersN);
        }
        sampleCount++;
        return RESULT_OK;
    } else {
        return RESULT_FAIL;
    }
}
Ejemplo n.º 18
0
    const CheckerboardDetector::Checkerboard &
    CheckerboardDetector::detect(const Img8u &image){
      const Img8u *useImage = &image;
      if(image.getFormat() != formatGray && image.getChannels() != 1){
        m_data->grayBuf.setFormat(formatGray);
        m_data->grayBuf.setSize(image.getSize());

        cc(&image, &m_data->grayBuf);

        useImage = &m_data->grayBuf;
      }
      img_to_ipl(useImage, &m_data->ipl);
      std::vector<CvPoint2D32f> corners(m_data->cb.size.getDim());

      CvSize s = {m_data->cb.size.width, m_data->cb.size.height };

      int n = corners.size();
      m_data->cb.found = cv::cvFindChessboardCorners(
          m_data->ipl, s, corners.data(), &n,
          cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FILTER_QUADS);

      bool optSubPix = getPropertyValue("subpixel opt.enabled");
      int radius = getPropertyValue("subpixel opt.radius");
      int innerR = getPropertyValue("subpixel opt.inner radius");
      if(innerR >= radius) innerR = radius -1;
      if(innerR == 0) innerR = -1;
      int maxIter = getPropertyValue("subpixel opt.max iterations");
      float minErr = getPropertyValue("subpixel opt.min error");

      if(m_data->cb.found && optSubPix){
        cvFindCornerSubPix(m_data->ipl, corners.data(), corners.size(), cvSize(radius,radius),
                           cvSize(innerR, innerR),
                           cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, maxIter, minErr));
      }

      if(m_data->cb.found){
        m_data->cb.corners.resize(corners.size());
        for(size_t i=0;i<corners.size();++i){
          m_data->cb.corners[i] = Point32f(corners[i].x, corners[i].y);
        }
      }else{
        m_data->cb.corners.clear();
      }

      return m_data->cb;
    }
Ejemplo n.º 19
0
bool CalibrationFilter::findTarget() 
{
	int numExpectedPoints = chessBoardCalibObject.numPoints;

	CvPoint2D32f* points = new CvPoint2D32f[numExpectedPoints];
	int numFoundPoints;
	
#ifdef USE_CHESSBOARD
	CvSize boardSize = cvSize(CHESSBOARD_CORNERS_X, CHESSBOARD_CORNERS_Y);
	
	int found_target = cvFindChessBoardCorners(input.getCvImage(),
											   boardSize,
											   points,
											   &numFoundCorners
											   CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
#endif
	
	if (found_target)
	{
		cvFindCornerSubPix(grayInput.getCvImage(),
						   points,
						   numFoundPoints,
						   cvSize(11,11), cvSize(-1,-1),
						   cvTermCriteria(
								CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,
								30,				 0.1));
		
#ifdef USE_CHESSBOARD_ONLY
		cvDrawChessboardCorners(input, boardSize, points, numFoundPoints, found_target);
#endif
		
		CalibrationCapture* capture = new CalibrationCapture(chessBoardPoints);
		capture.image = input;
		capture.setPoints(points, numFoundPoints);
		
		captures.push_back(capture);
	}
	
	delete points;
	return found_target;
}
void FkPaperKeyboard_TypeA::detectKeyButtonCorner(IplImage* srcImage){
	//500,250
	CvSize transSize = cvSize(640,480);
	IplImage* eigImage = cvCreateImage(transSize, IPL_DEPTH_8U,1);
	IplImage* tempImage = cvCreateImage(transSize, IPL_DEPTH_8U, 1);
	IplImage* grayImage = cvCreateImage(transSize, IPL_DEPTH_8U, 1);
	IplImage* dstImage = cvCreateImage(transSize, IPL_DEPTH_8U, 1);
	int keyButtonCornerCount = 316;
	cvCopy(srcImage, grayImage);
	//cvShowImage("ssssssssssssssss",srcImage);
	//cvShowImage("gggggggggggggggg",grayImage);
	

	cvGoodFeaturesToTrack(grayImage, eigImage, tempImage, keyButtonCorner, &keyButtonCornerCount, 0.03, 7, NULL, 10, 0);
	cvFindCornerSubPix (grayImage, keyButtonCorner, keyButtonCornerCount,cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
	initKeyButtonCorner();
	cvReleaseImage(&eigImage);
	cvReleaseImage(&tempImage);
	cvReleaseImage(&grayImage);
	cvReleaseImage(&dstImage);
}
Ejemplo n.º 21
0
void ofxOpticalFlow::calc(ofxCvGrayscaleImage& prevFrame, ofxCvGrayscaleImage& currentFrame){
	int cornerCount = MAX_CORNERS;
	cvGoodFeaturesToTrack(prevFrame.getCvImage(), eigImg, tempImg, cornersPrev, &cornerCount, 0.01, 5.0, 0, 3, 0, 0.04);
	
	cvFindCornerSubPix(prevFrame.getCvImage(), cornersPrev, cornerCount, 
					   windowSize, cvSize(-1,-1), 
					   cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
	
	cvCalcOpticalFlowPyrLK(prevFrame.getCvImage(), currentFrame.getCvImage(), pyrPrev, 
						   pyrCurr, cornersPrev, cornersCurr, cornerCount, windowSize, 
						   5, featuresFound, NULL,
						   cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03), 0);
	
	flowPoints.clear();
	for(int i=0; i<cornerCount; i++){
		if(featuresFound[i] == 0){
			continue;
		}
		flowPoints.push_back(ofxCvFlowPoint(cornersPrev[i].x, cornersPrev[i].y, cornersCurr[i].x, cornersCurr[i].y));
	}
}
Ejemplo n.º 22
0
bool CamCalib::FindChessboard(IplImage *src, IplImage *dst)
{
	IplImage *gray = cvCreateImage (cvGetSize(src), IPL_DEPTH_8U, 1);
	
	cvCvtColor(src, gray, CV_BGR2GRAY);

	// 체스판 코너 찾기
	CvPoint2D32f* corners = new CvPoint2D32f[_board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(src, cvSize(_board_w, _board_h), corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );

	// 검출된 코너로부터 서브픽셀 정확도로 코너 좌표를 구한다.
	cvFindCornerSubPix (gray, corners, corner_count, cvSize(11,11), cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

	// 코너를 dst 이미지에 그린다.
	cvDrawChessboardCorners (dst, cvSize(_board_w, _board_h), corners, corner_count, found);

	// 코너를 정상적으로 찾았다면, 코너 데이터를 저장한다.
	bool ret = false;
	if (found && corner_count == _board_n) {
		for( int i=_successes*_board_n, j=0; j<_board_n; ++i, ++j ) {
			CV_MAT_ELEM(*_image_points, float, i, 0) = corners[j].x;
			CV_MAT_ELEM(*_image_points, float, i, 1) = corners[j].y;
			CV_MAT_ELEM(*_object_points,float, i, 0) = (float)(j%_board_w)*_cell_w;
			CV_MAT_ELEM(*_object_points,float, i, 1) = (float)(_board_h - j/_board_w - 1)*_cell_h;
			CV_MAT_ELEM(*_object_points,float, i, 2) = 0.0f;
		}
		CV_MAT_ELEM(*_point_counts, int, _successes, 0) = _board_n; 
		
		ret = true;
	}	

	delete [] corners;
	cvReleaseImage(&gray);  
	return ret;
}
Ejemplo n.º 23
0
void cameraCalibration() {
	board_w = 5; // Board width in squares
	board_h = 8; // Board height
	n_boards = 8; // Number of boards
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CameraControl* cc = camera_control_new(0);


	//cvNamedWindow("Calibration", 0);
	// Allocate Sotrage
	CvMat* image_points = cvCreateMat(n_boards * board_n, 2, CV_32FC1);
	CvMat* object_points = cvCreateMat(n_boards * board_n, 3, CV_32FC1);
	CvMat* point_counts = cvCreateMat(n_boards, 1, CV_32SC1);
	CvMat* intrinsic_matrix = cvCreateMat(3, 3, CV_32FC1);
	CvMat* distortion_coeffs = cvCreateMat(5, 1, CV_32FC1);
	IplImage *image;

	CvPoint2D32f corners[board_n];
	int i = 0;
	int j = 0;

	for (i = 0; i < board_n; i++)
		corners[i] = cvPoint2D32f(0, 0);

	int corner_count;
	int successes = 0;
	int step = 0;



	while (1) {
		cvWaitKey(10);
		image = camera_control_query_frame(cc);
		if (image)
			break;
	}
	IplImage *gray_image = cvCreateImage(cvGetSize(image), 8, 1);

	// Capture Corner views loop until we've got n_boards
	// succesful captures (all corners on the board are found)
	while (successes < n_boards) {
		// Skp every board_dt frames to allow user to move chessboard
		// skip a second to allow user to move the chessboard
		image = camera_control_query_frame(cc); // Get next image
		//if (frame++ % board_dt == 0)
		{
			// Find chessboard corners:
			int found = cvFindChessboardCorners(image, board_sz, corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
			cvWaitKey(1);

			// Get subpixel accuracy on those corners
			cvCvtColor(image, gray_image, CV_BGR2GRAY);
			cvFindCornerSubPix(gray_image, corners, corner_count, cvSize(11, 11), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));

			// Draw it
			cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
			char text[222];
			sprintf(text, "calibration image %d/%d", successes, n_boards);
			th_put_text(image, text, cvPoint(20, 20), th_white, 1.0);
			cvShowImage("Calibration", image);

			// If we got a good board, add it to our data
			if (corner_count == board_n ) {
				step = successes * board_n;
				for (i = step, j = 0; j < board_n; ++i, ++j) {
					CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
					CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
					CV_MAT_ELEM( *object_points, float, i, 0 ) = j / board_w;
					CV_MAT_ELEM( *object_points, float, i, 1 ) = j % board_w;
					CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
				}
				CV_MAT_ELEM( *point_counts, int, successes, 0 ) = board_n;
				successes++;
			}

		}
	}
Ejemplo n.º 24
0
bool CvCalibFilter::FindEtalon( CvMat** mats )
{
    bool result = true;

    if( !mats || etalonPointCount == 0 )
    {
        assert(0);
        result = false;
    }

    if( result )
    {
        int i, tempPointCount0 = etalonPointCount*2;

        for( i = 0; i < cameraCount; i++ )
        {
            if( !latestPoints[i] )
                latestPoints[i] = (CvPoint2D32f*)
                    cvAlloc( tempPointCount0*2*sizeof(latestPoints[0]));
        }

        for( i = 0; i < cameraCount; i++ )
        {
            CvSize size;
            int tempPointCount = tempPointCount0;
            bool found = false;

            if( !CV_IS_MAT(mats[i]) && !CV_IS_IMAGE(mats[i]))
            {
                assert(0);
                break;
            }

            size = cvGetSize(mats[i]);

            if( size.width != imgSize.width || size.height != imgSize.height )
            {
                imgSize = size;
            }

            if( !grayImg || grayImg->width != imgSize.width ||
                grayImg->height != imgSize.height )
            {
                cvReleaseMat( &grayImg );
                cvReleaseMat( &tempImg );
                grayImg = cvCreateMat( imgSize.height, imgSize.width, CV_8UC1 );
                tempImg = cvCreateMat( imgSize.height, imgSize.width, CV_8UC1 );
            }

            if( !storage )
                storage = cvCreateMemStorage();

            switch( etalonType )
            {
            case CV_CALIB_ETALON_CHESSBOARD:
                if( CV_MAT_CN(cvGetElemType(mats[i])) == 1 )
                    cvCopy( mats[i], grayImg );
                else
                    cvCvtColor( mats[i], grayImg, CV_BGR2GRAY );
                found = cvFindChessBoardCornerGuesses( grayImg, tempImg, storage,
                                                       cvSize( cvRound(etalonParams[0]),
                                                       cvRound(etalonParams[1])),
                                                       latestPoints[i], &tempPointCount ) != 0;
                if( found )
                    cvFindCornerSubPix( grayImg, latestPoints[i], tempPointCount,
                                        cvSize(5,5), cvSize(-1,-1),
                                        cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,10,0.1));
                break;
            default:
                assert(0);
                result = false;
                break;
            }

            latestCounts[i] = found ? tempPointCount : -tempPointCount;
            result = result && found;
        }
    }

    if( storage )
        cvClearMemStorage( storage );

    return result;
}
Ejemplo n.º 25
0
/*!
  Initialise the tracking by extracting KLT keypoints on the provided image.

  \param I : Grey level image used as input. This image should have only 1 channel.
  \param mask : Image mask used to restrict the keypoint detection area.
  If mask is NULL, all the image will be considered.

  \exception vpTrackingException::initializationError : If the image I is not
  initialized, or if the image or the mask have bad coding format.
*/
void vpKltOpencv::initTracking(const IplImage *I, const IplImage *mask)
{
  if (!I) {
    throw(vpException(vpTrackingException::initializationError,  "Image Not initialized")) ;
  }

  if (I->depth != IPL_DEPTH_8U || I->nChannels != 1)  {
    throw(vpException(vpTrackingException::initializationError,  "Bad Image format")) ;
  }

  if (mask) {
    if (mask->depth != IPL_DEPTH_8U || I->nChannels != 1) 	{
      throw(vpException(vpTrackingException::initializationError,  "Bad Image format")) ;
    }
  }

  //Creation des buffers
  CvSize Sizeim, SizeI;
  SizeI = cvGetSize(I);
  bool b_imOK = true;
  if(image != NULL){
    Sizeim = cvGetSize(image);
    if(SizeI.width != Sizeim.width || SizeI.height != Sizeim.height) b_imOK = false;
  }
  if(image == NULL || prev_image == NULL || pyramid==NULL || prev_pyramid ==NULL || !b_imOK){
    reset();
    image = cvCreateImage(cvGetSize(I), 8, 1);image->origin = I->origin;
    prev_image = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
    pyramid = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
    prev_pyramid = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
  }else{
    swap_temp = 0;
    countFeatures = 0;
    countPrevFeatures = 0;
    flags = 0;
    initialized = 0;
    globalcountFeatures = 0;
  }

  initialized = 1;

  //Import
  cvCopy(I, image, 0);

  //Recherche de points d'interets
  countFeatures = maxFeatures;
  countPrevFeatures = 0;
  IplImage* eig = cvCreateImage(cvGetSize(image), 32, 1);
  IplImage* temp = cvCreateImage(cvGetSize(image), 32, 1);
  cvGoodFeaturesToTrack(image, eig, temp, features,
                        &countFeatures, quality, min_distance,
                        mask, block_size, use_harris, harris_free_parameter);
  cvFindCornerSubPix(image, features, countFeatures, cvSize(win_size, win_size),
                     cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|
                                                  CV_TERMCRIT_EPS,20,0.03));
  cvReleaseImage(&eig);
  cvReleaseImage(&temp);

  if (OnInitialize)
    OnInitialize(_tid);

  //printf("Number of features at init: %d\n", countFeatures);
  for (int boucle=0; boucle<countFeatures;boucle++)  {
    featuresid[boucle] = globalcountFeatures;
    globalcountFeatures++;
    
    if (OnNewFeature){
      OnNewFeature(_tid, boucle, featuresid[boucle], features[boucle].x,
                   features[boucle].y);
    }
  }
}
Ejemplo n.º 26
0
void get_bird_eye(CvCapture* capture) {
	printf("haha\n");
	//get bird_eye picture
	cvNamedWindow("Get_birdeye");
	
	CvMat *intrinsic = (CvMat*) cvLoad("Intrinsics.xml");
	CvMat *distortion = (CvMat*) cvLoad("Distortion.xml");
	
	IplImage *image = cvQueryFrame(capture);
	IplImage *gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	
	int board_n = board_w * board_h;
	
	IplImage* mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage* mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	
	cvInitUndistortMap(
			intrinsic, 
			distortion, 
			mapx, 
			mapy
	);
	
	CvSize board_sz = cvSize(board_w, board_h);
	CvPoint2D32f* corners = new CvPoint2D32f[board_n];
	
	int frame = 0;
	int corner_count;
	bool catch_bird = false;


	while(!catch_bird) {
		IplImage *t = cvCloneImage(image);
		if(frame++ % board_dt == 0) {
			IplImage* t = cvCloneImage(image);
			cvRemap(t, image, mapx, mapy);
			
			int found = cvFindChessboardCorners(
					image, 
					board_sz, 
					corners, 
					&corner_count,
					CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
			);

			cvCvtColor(
					image, 
					gray_image, 
					CV_RGB2GRAY
			);
			
			cvFindCornerSubPix(
					gray_image, 
					corners, 
					corner_count, 
					cvSize(11,11), 
					cvSize(-1, -1), 
					cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)
			);
			
			cvDrawChessboardCorners(
					image, 
					board_sz, 
					corners, 
					corner_count, 
					found
			);
			
			cvShowImage("Get_birdeye", image);
			//get a good board, add to data
			if(corner_count == board_n) {
				catch_bird = true;
				printf("That's it!\n");
			}
		}
		int c;
		if(catch_bird) c = cvWaitKey(-1);
		else c = cvWaitKey(15);
		if(catch_bird && c == 's') {
			cvSaveImage("./Resource/bird-eye.jpg", t, 0);
			printf("save at ./Resource/bird-eye.jpg\n");
		}
		else catch_bird = false;
		if(c == 'p') {
			c = 0;
			while(c!='p' && c!= 27){
				c = cvWaitKey(250);
			}
		}
		image = cvQueryFrame(capture);
	}
}
Ejemplo n.º 27
0
int main( int argc, char* argv[] ) {

	//IplImage* img = cvCreateImage(imSize,IPL_DEPTH_8U,3);
	IplImage* img = cvLoadImage(imcd0,CV_LOAD_IMAGE_UNCHANGED);
	IplImage* imgA = cvLoadImage(imcd0,CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgB = cvLoadImage(imcd1,CV_LOAD_IMAGE_GRAYSCALE);
	imSize = cvSize(img->width,img->height);
	rmax=0.8*((imSize.width>imSize.height)?imSize.height/2:imSize.width/2);
	rmin=0.2*((imSize.width>imSize.height)?imSize.height/2:imSize.width/2);
	lx=0.5*imSize.width;
	ly=0.5*imSize.height;
	int win_siz	= 7;
	int arr_siz	= NUMX*NUMY;
	CvPoint2D32f p0 = cvPoint2D32f(imSize.width/2,imSize.height/2);
	IplImage*	pyr = cvCreateImage(imSize,8,1);
	IplImage*	pyr_old = cvCreateImage(imSize,8,1);
	char* status	=0;
	status = (char*)cvAlloc(arr_siz);


	cvNamedWindow("testWindow");
	cvNamedWindow("ImgA");
	cvShowImage("ImgA", imgA);
	cvNamedWindow("ImgB");
	cvShowImage("ImgB", imgB);

	CvPoint2D32f*	arrg		= new CvPoint2D32f[arr_siz];
	CvPoint2D32f*	arrg_old	= new CvPoint2D32f[arr_siz];

	int counter=0;
	for(int x=0; x<NUMX; x++) {
		for(int y=0; y<NUMY; y++) {
			arrg_old[counter].x = p0.x + (-lx/2) + lx*x/NUMX;
			arrg_old[counter].y = p0.y + (-ly/2) + lx*y/NUMY;
			counter++;
		}
	}
	cout << "f**k-0" << endl;
	for(int i=0; i<arr_siz; i++) {
		cvLine(img,cvPointFrom32f(arrg_old[i]),cvPointFrom32f(arrg_old[i]),CV_RGB(0,0,0),4);
	}
	cvShowImage("testWindow",img);
	cvWaitKey(100);
	cout << "f**k-1" << endl;

	cvFindCornerSubPix(imgA,
	        			arrg_old,
	        			arr_siz,
	        			cvSize(win_siz,win_siz),
	        			cvSize(2,2),
	        			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
	//cvReleaseImage(&img);
	//img = cvLoadImage(imcd0,CV_LOAD_IMAGE_UNCHANGED);
	cout << "f**k-2" << endl;
	for(int i=0; i<arr_siz; i++) {
		cvLine(img,cvPointFrom32f(arrg_old[i]),cvPointFrom32f(arrg_old[i]),CV_RGB(255,0,255),4);
	}
	cvShowImage("testWindow",img);
	cvWaitKey(100);
	cout << "f**k-3" << endl;

	float errors[arr_siz];
	cvCalcOpticalFlowPyrLK(imgA,imgB,
	        			pyr_old, pyr,
	        			arrg_old,
	        			arrg,
	        			arr_siz,
	        			cvSize(win_siz,win_siz),
	        			5,
	        			status,
	        			errors,
	        			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.3),
	        			0);

	CvPoint2D32f dp, dp2;
	CvPoint2D32f center = cvPoint2D32f(0., 0.);
	bool arr_draw[arr_siz];
	int count = 0;
	for(int i=0; i<arr_siz; i++) {
		cvLine(img,cvPointFrom32f(arrg[i]),cvPointFrom32f(arrg[i]),CV_RGB(0,255,0),4);
		CvScalar color = CV_RGB(255,0,0);
		dp = getDp(arrg[i],arrg_old[i]);
		double len = getLength(dp);
//		if(errors[i]<50) {
		if(getLength(dp)>3) {
			color = CV_RGB(255,0,0);
		} else {
			color = CV_RGB(100,255,100);
		}
		int nc = i+1;
		arr_draw[i] = false;
		if((nc>-1) && (nc<arr_siz) && len>3) {
			dp2=getDp(arrg[nc],arrg_old[nc]);
			if(getLength(dp2)>2) {
				CvPoint2D32f ctmp = getCrossPoint(arrg_old[i],getOrtoVec(dp), arrg_old[nc],getOrtoVec(dp2));
//				cvLine(img,cvPointFrom32f(arrg_old[i]),cvPointFrom32f(ctmp),CV_RGB(0,0,0),1);
//				cvLine(img,cvPointFrom32f(arrg[i]),cvPointFrom32f(ctmp),CV_RGB(0,0,0),1);
				center = getSum(center,ctmp);
				count++;
				arr_draw[i] = true;
			}
		}
		drawArrow(img,arrg_old[i],arrg[i],color,2,15.);
		cout << "status=[" << (int)status[i] << "], error=[" << errors[i] << "]" << endl;
//		cout << "[" << arrg[i].x << "," << arrg[i].y << "]" << endl;

	}
	center=getDiv(center,count);

	cvCircle(img,cvPointFrom32f(center),10,CV_RGB(0,200,0),1);
	double df = 0;
	for(int i=0; i<arr_siz; i++) {
		if(arr_draw[i]) {
			cvLine(img, cvPointFrom32f(center), cvPointFrom32f(arrg_old[i]),CV_RGB(0,0,0),1);
			cvLine(img, cvPointFrom32f(center), cvPointFrom32f(arrg[i]),CV_RGB(0,0,0),1);
			df += 180.0*(getLength(getDel(arrg[i],arrg_old[i])))
			/(CV_PI*getLength(getDel(arrg_old[i],center)));
		}
	}
	CvFont font, fontbg;
	cvInitFont(&font,CV_FONT_HERSHEY_PLAIN, 2, 2, 0.0, 2, CV_AA);
	cvInitFont(&fontbg,CV_FONT_HERSHEY_PLAIN, 2, 2, 0.0, 8, CV_AA);
	char buff[100];
	bzero(buff,sizeof(buff));
	sprintf(buff,"angle=%0.1f degres",(df/count));
	cvPutText(img,buff,cvPoint(10,25),&fontbg,CV_RGB(0,0,0));
	cvPutText(img,buff,cvPoint(10,25),&font,CV_RGB(255,0,0));

/*
	for(int r=0; r<NUMR; r++) {
		for(int f=0; f<NUMF; f++) {
			double pfi = 2*CV_PI*f/NUMF;
			double ro	= rmin + (rmax-rmin)*r/NUMR;
			p1.x = p0.x + ro*cos(pfi);
			p1.y = p0.y + ro*sin(pfi);
			//cvLine(img,cvPointFrom32f(p1),cvPointFrom32f(p1),CV_RGB(0,0,255),2);
			drawArrow(img,p0,p1,CV_RGB(255,0,0));
		}
	}
*/
	cvShowImage("testWindow",img);
	cvWaitKey(0);

	cvDestroyWindow("testWindow");
	cvReleaseImage(&img);
	cout << "Shutdown" << endl;
	return 0;
}
int main (int argc, char **argv)
{
	if(argc < 8) {
		printf("usage: Compute <skip> <out_dir> <num_cameras> <corner_rows> <corner_cols> <int_dir1> <rgb_images1> <ir_images1> <int_dir2> ...\n");
		return 1;
	}

	int skip = atoi(argv[1]);
	char * outDir = argv[2];
	int numCameras = atoi(argv[3]);
	int corner_rows = atoi(argv[4]);
	int corner_cols = atoi(argv[5]);
	int argBase = 6;
	if(argc != argBase+3*numCameras) {
		printf("Expected %d intrinsics entries for %d cameras.\n", numCameras, numCameras);
		return -1;
	}

	char temp[1024];
	int i, j, k, type;  

	CvMat* cam_mat = cvCreateMat( 3, 3, CV_32FC1 );
	CvMat* dist_coeff = cvCreateMat( 5, 1, CV_32FC1 );
	CvPoint2D32f* corners = (CvPoint2D32f*) malloc(sizeof(CvPoint2D32f)*corner_rows*corner_cols);
	for(type = 0; type <= 1; type++) {
		for(i = 0; i < numCameras; i++) {
			int numSkipped = 0;
			int numImages = atoi(argv[argBase+i*3+(type==0?1:2)]);
			char * intDir = argv[argBase+i*3];
			CvMat* image_points = cvCreateMat(numImages*corner_rows*corner_cols, 2, CV_32FC1 );
			CvMat* object_points = cvCreateMat(numImages*corner_rows*corner_cols, 3, CV_32FC1 );
			CvMat* point_counts = cvCreateMat(numImages, 1, CV_32SC1 );
			for(j = 0; j < numImages; j++) {
				//load image
				sprintf(temp, "%s/img_%s_%d.png", intDir, type==0?"rgb":"ir", j*skip);
				printf("loading %s...\n",temp);

				IplImage *img = cvLoadImage(temp, CV_LOAD_IMAGE_GRAYSCALE);
				if(img != NULL) {
					int corner_count = 0;
					if(cvFindChessboardCorners(img, cvSize(corner_cols, corner_rows), corners, &corner_count, 
						CV_CALIB_CB_ADAPTIVE_THRESH|CV_CALIB_CB_NORMALIZE_IMAGE)) {

							cvFindCornerSubPix(img, corners, corner_count, cvSize(11, 11), cvSize(-1, -1),
								cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, REFINE_MAX_ITER, REFINE_EPSILON));

							int offset = (j-numSkipped)*corner_rows*corner_cols;
							for(k = 0; k < corner_rows*corner_cols; k++) {
								CV_MAT_ELEM(*image_points, float, offset+k, 0 ) = corners[k].x;
								CV_MAT_ELEM(*image_points, float, offset+k, 1 ) = corners[k].y;
								CV_MAT_ELEM(*object_points, float, offset+k, 0 ) = SQUARE_SIZE*(k%corner_cols);
								CV_MAT_ELEM(*object_points, float, offset+k, 1 ) = SQUARE_SIZE*(k/corner_cols);
								CV_MAT_ELEM(*object_points, float, offset+k, 2 ) = 0.0f;
							}
							CV_MAT_ELEM(*point_counts, int, j-numSkipped, 0 ) = corner_rows*corner_cols;

					} else {
						printf("Cannot find corners in image %s, skipping\n", temp);
						numSkipped++;
						//return -1;
					}


					cvReleaseImage(&img);
				} else {
Ejemplo n.º 29
0
void FeatureTracker::track_features(geometry_msgs::PoseStamped mapPose){
  //set the initial number of features to the max number we want to find
  int feature_count=num_features;
  printf("pose %f %f %f\n",mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
  int edge_pixels=5;
  
  //check if there were features from the last image to keep tracking
  if(last_feature_count>0){
    //if there were call cvCalcOpticalFlowPyrLK();
    //find matches between last good features and current image features
    //    store matches in featuresB
    cvCalcOpticalFlowPyrLK(last_image,image_rect,pyrA,pyrB,features,featuresB, last_feature_count,cvSize(win_size,win_size) ,4,last_features_status,track_error, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,.3),0);
  }
  
  printf("got image flow\n");
  //    assign last_feature_id values for matched features and set the non matched spots to -1
  
  //find new features and subpixel them
  
  //I SHOULD ADD THE IMAGE FLOW VALUES AS FEATURES NOW BEFORE FINDING NEW FEATURES
  
  //find all good features
  cvGoodFeaturesToTrack(image_rect, eigImage, tempImage, features, &feature_count, quality_level, min_distance, NULL, block_size);
  
  //subpixel good features
  cvFindCornerSubPix(image_rect,features,feature_count,cvSize(win_size,win_size),cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
  
  
  printf("subpixeled image\n");
  
  //for all the features in features B, find their matches in the newly found features
  //add all the matches to their correct featuremanager, for the non matching, make a new
  //feature manager and add them to it
  
  //for all features by now we need their ray and the robot pose at that location
  //draw dots on image where features are
  
  
  
  //set the feature ids to a control value
  
  for(int i=0;i<num_features;i++){
    current_feature_id[i]=-1;
  }
  
  for(int i=0;i<last_feature_count;i++){
    //for the previously found features in list b
    if(last_features_status[i]>0){
      for(int j=0;j<feature_count;j++){
	//for every feature found in this image
	
	//determine if the two overlap in a meaningful way
	int xdiff=featuresB[i].x-features[j].x;
	int ydiff=featuresB[i].y-features[j].y;
	//if the pixels are within some margin of eachother
	if(sqrt(xdiff*xdiff + ydiff*ydiff)<pixel_tracking_margin){
	  //if they do set the current id for j to the id of i
	  current_feature_id[j]=last_feature_id[i];
	  printf("feature found %d %d",last_feature_id[i],i);
	}
      }
    }
  }
  
  printf("assigned IDs image\n");
  
  
  for(int i=0;i<feature_count;i++){
    
    printf("looping\n");
    if(current_feature_id[i]>=0){
    printf("prev feature match\n");
      //if we matched a previous feature
      //add our new feature to the previous features list
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
      
      if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
	tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
	featureList[current_feature_id[i]].add(RawFeature(mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation), tempPoint,tempRay));
      }else{
	current_feature_id[i]=-1;
      }
      
    }else{
    printf("new feature \n");
      
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
      if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
	tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
	printf("new good feature \n");
	//if we didn't
	//create a new feature group in the list
	current_feature_id[i]=feature_number;
	//add the new feature to the feature list
	featureList.push_back(FeatureManager());

	featureList[feature_number].add(RawFeature(mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation), tempPoint,tempRay));
	++feature_number;
      }
    }
  }
   
//  printf("features: ");
  for(int i=0;i<num_features;i++){
    if(i<feature_count){
     last_feature_id[i]=current_feature_id[i];
    }
    else{
      last_feature_id[i]=-1;
    }
 //   printf(" %d ",current_feature_id[i]);
  }
  printf("\n");
  
  
  last_feature_count=feature_count;
  
}
Ejemplo n.º 30
0
void bird_eye() {
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat*) cvLoad("Intrinsics.xml");
	CvMat *distortion = (CvMat*) cvLoad("Distortion.xml");

	IplImage* image = cvLoadImage("./Resource/bird-eye.jpg", 1);
	IplImage* gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	IplImage* mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage* mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	
	cvInitUndistortMap(
			intrinsic, 
			distortion, 
			mapx, 
			mapy
	);
	
	IplImage* t = cvCloneImage(image);

	cvRemap(t, image, mapx, mapy);
	
	cvNamedWindow("Chessboard");
	cvShowImage("Chessboard", image);
	int c = cvWaitKey(-1);
	CvPoint2D32f* corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	
	int found = cvFindChessboardCorners(
			image, 
			board_sz, 
			corners, 
			&corner_count, 
			CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
	);
	
	if(!found){
		printf("couldn't aquire chessboard!\n");
		return;
	}
	
	cvFindCornerSubPix(
			gray_image, 
			corners, 
			corner_count, 
			cvSize(11, 11), 
			cvSize(-1, -1), 
			cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1)
	);

	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;			objPts[0].y = 0;
	objPts[1].x = board_w - 1;	objPts[1].y = 0;
	objPts[2].x = 0;			objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;	objPts[3].y = board_h - 1;
	imgPts[0]   = corners[0];
	imgPts[1]	= corners[board_w - 1];
	imgPts[2]	= corners[(board_h - 1) * board_w];
	imgPts[3]	= corners[(board_h - 1) * board_w + board_w - 1];

	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	cvDrawChessboardCorners(
		image,
		board_sz,
		corners,
		corner_count,
		found
	);

	cvShowImage("Chessboard", image);

	CvMat *H = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	float z = 25;
	int key = 0;
	IplImage * birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");

	while(key != 27) {
		CV_MAT_ELEM(*H, float, 2, 2) = z;

		cvWarpPerspective(
			image,
			birds_image,
			H,
			CV_INTER_LINEAR| CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS
		);

		cvShowImage("Birds_Eye", birds_image);

		key = cvWaitKey();
		if(key == 'u') z += 0.5;
		if(key == 'd') z -= 0.5;
	}

	cvSave("H.xml", H);
}