コード例 #1
0
void compute_and_display_image_corners(char * imageName, CvSize * imageSize, CvSize chessboardSize, CvPoint2D32f * cornersArrayToFillIn)
{
    IplImage * img = 0;
    int cornersCount = 0;
    int patternWasFound = 0;
    int i;


    img = cvLoadImage(imageName, 1);

    *imageSize = cvGetSize(img); // useful only for calibration function

    //initialisation of the given array
    for(i=0;i<chessboardSize.height*chessboardSize.width;i++){ cornersArrayToFillIn[i].x= 0; cornersArrayToFillIn[i].y= 0; }

    printf("OK1\n");
    // core algorithm
    patternWasFound = cvFindChessboardCorners(img, chessboardSize, cornersArrayToFillIn, &cornersCount, 0);

    printf("OK2\n");

    // display_array_values(cornersArrayToFillIn,chessboardSize.height*chessboardSize.width);
    improve_precision(img, cornersArrayToFillIn, cornersCount);
    // display_array_values(cornersArrayToFillIn,chessboardSize.height*chessboardSize.width);

    // visual only part

    cvDrawChessboardCorners(img, chessboardSize, cornersArrayToFillIn, cornersCount, patternWasFound);
    cvNamedWindow(imageName, CV_WINDOW_AUTOSIZE);
    cvMoveWindow(imageName, 100, 100);
    cvShowImage(imageName, img );
    cvWaitKey(200);
    cvDestroyWindow(imageName);


    // end
    cvReleaseImage(&img );

}
コード例 #2
0
ファイル: CamCalib.cpp プロジェクト: Abdul144/Pinocchio
bool CamCalib::FindChessboard(IplImage *src, IplImage *dst)
{
	IplImage *gray = cvCreateImage (cvGetSize(src), IPL_DEPTH_8U, 1);
	
	cvCvtColor(src, gray, CV_BGR2GRAY);

	// 체스판 코너 찾기
	CvPoint2D32f* corners = new CvPoint2D32f[_board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(src, cvSize(_board_w, _board_h), corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );

	// 검출된 코너로부터 서브픽셀 정확도로 코너 좌표를 구한다.
	cvFindCornerSubPix (gray, corners, corner_count, cvSize(11,11), cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

	// 코너를 dst 이미지에 그린다.
	cvDrawChessboardCorners (dst, cvSize(_board_w, _board_h), corners, corner_count, found);

	// 코너를 정상적으로 찾았다면, 코너 데이터를 저장한다.
	bool ret = false;
	if (found && corner_count == _board_n) {
		for( int i=_successes*_board_n, j=0; j<_board_n; ++i, ++j ) {
			CV_MAT_ELEM(*_image_points, float, i, 0) = corners[j].x;
			CV_MAT_ELEM(*_image_points, float, i, 1) = corners[j].y;
			CV_MAT_ELEM(*_object_points,float, i, 0) = (float)(j%_board_w)*_cell_w;
			CV_MAT_ELEM(*_object_points,float, i, 1) = (float)(_board_h - j/_board_w - 1)*_cell_h;
			CV_MAT_ELEM(*_object_points,float, i, 2) = 0.0f;
		}
		CV_MAT_ELEM(*_point_counts, int, _successes, 0) = _board_n; 
		
		ret = true;
	}	

	delete [] corners;
	cvReleaseImage(&gray);  
	return ret;
}
コード例 #3
0
// --------------------------------------------------------------------------
// main(Number of arguments, Argument values)
// Description  : This is the entry point of the program.
// Return value : SUCCESS:0  ERROR:-1
// --------------------------------------------------------------------------
int main(int argc, char **argv)
{
    // AR.Drone class
    ARDrone ardrone;

    // Initialize
    if (!ardrone.open()) {
        printf("Failed to initialize.\n");
        return -1;
    }

    // Images
    std::vector<IplImage*> images;
    printf("Press space key to take a sample picture !\n");

    // Main loop
    while (1) {
        // Key input
        int key = cvWaitKey(1);
        if (key == 0x1b) break;

        // Update
        if (!ardrone.update()) break;

        // Get an image
        IplImage *image = ardrone.getImage();

        // Convert the camera image to grayscale
        IplImage *gray = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
        cvCvtColor(image, gray, CV_BGR2GRAY);

        // Detect the chessboard
        int corner_count = 0;
        CvSize size = cvSize(PAT_COL, PAT_ROW);
        CvPoint2D32f corners[PAT_SIZE];
        int found = cvFindChessboardCorners(gray, size, corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE|CV_CALIB_CB_FAST_CHECK);

        // Chessboard detected
        if (found) {
            // Draw corners
            cvDrawChessboardCorners(image, size, corners, corner_count, found);

            // If you push Space key
            if (key == ' ') {
                // Add to buffer
                images.push_back(gray);
            }
            else {
                // Release the image
                cvReleaseImage(&gray);
            }
        }
        // Failed to detect
        else {
            // Release the image
            cvReleaseImage(&gray);
        }

        // Display the image
        cvDrawText(image, cvPoint(15, 20), "NUM = %d", (int)images.size());
        cvShowImage("camera", image);
    }

    // Destroy the window
    cvDestroyWindow("camera");

    // At least one image was taken
    if (!images.empty()) {
        // Total number of images
        const int num = (int)images.size();

        //// For debug
        //for (int i = 0; i < num; i++) {
        //    char name[256];
        //    sprintf(name, "images[%d/%d]", i+1, num);
        //    cvShowImage(name, images[i]);
        //    cvWaitKey(0);
        //    cvDestroyWindow(name);
        //}

        // Ask save parameters or not
        if (cvAsk("Do you save the camera parameters ? (y/n)\n")) {
            // Detect coners
            int *p_count = (int*)malloc(sizeof(int) * num);
            CvPoint2D32f *corners = (CvPoint2D32f*)cvAlloc(sizeof(CvPoint2D32f) * num * PAT_SIZE);
            for (int i = 0; i < num; i++) {
                // Detect chessboard
                int corner_count = 0;
                CvSize size = cvSize(PAT_COL, PAT_ROW);
                int found = cvFindChessboardCorners(images[i], size, &corners[i * PAT_SIZE], &corner_count);

                // Convert the corners to sub-pixel
                cvFindCornerSubPix(images[i], &corners[i * PAT_SIZE], corner_count, cvSize(3, 3), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
                p_count[i] = corner_count;
            }

            // Set the 3D position of patterns
            CvPoint3D32f *objects = (CvPoint3D32f*)cvAlloc(sizeof(CvPoint3D32f) * num * PAT_SIZE);
            for (int i = 0; i < num; i++) {
                for (int j = 0; j < PAT_ROW; j++) {
                    for (int k = 0; k < PAT_COL; k++) {
                        objects[i * PAT_SIZE + j * PAT_COL + k].x = j * CHESS_SIZE;
                        objects[i * PAT_SIZE + j * PAT_COL + k].y = k * CHESS_SIZE;
                        objects[i * PAT_SIZE + j * PAT_COL + k].z = 0.0;
                    }
                }
            }

            // Create matrices
            CvMat object_points, image_points, point_counts;
            cvInitMatHeader(&object_points, num * PAT_SIZE, 3, CV_32FC1, objects);
            cvInitMatHeader(&image_points,  num * PAT_SIZE, 1, CV_32FC2, corners);
            cvInitMatHeader(&point_counts,  num,            1, CV_32SC1, p_count);

            // Estimate intrinsic parameters and distortion coefficients
            printf("Calicurating parameters...");
            CvMat *intrinsic   = cvCreateMat(3, 3, CV_32FC1);
            CvMat *distortion  = cvCreateMat(1, 4, CV_32FC1);
            cvCalibrateCamera2(&object_points, &image_points, &point_counts, cvGetSize(images[0]), intrinsic, distortion);
            printf("Finished !\n");

            // Output a file
            printf("Generating a XML file...");
            CvFileStorage *fs = cvOpenFileStorage("camera.xml", 0, CV_STORAGE_WRITE);
            cvWrite(fs, "intrinsic", intrinsic);
            cvWrite(fs, "distortion", distortion);
            cvReleaseFileStorage(&fs);    
            printf("Finished !\n");

            // Release the matrices
            free(p_count);
            cvFree(&corners);
            cvFree(&objects);
            cvReleaseMat(&intrinsic);
            cvReleaseMat(&distortion);
        }

        // Release the images
        for (int i = 0; i < num; i++) cvReleaseImage(&images[i]);
    }

    // See you
    ardrone.close();

    return 0;
}
コード例 #4
0
ファイル: calibrate.c プロジェクト: kthakore/simcam
int main(int argc, char * argv[])
{
	int corner_count;
	int successes = 0;
	int step, frame = 0;

	const char* intrinsics_path = argv[1];
	const char* distortions_path = argv[2];
	int total = argc - 3 ; 
	int start = 3;

	const char* loc = argv[start] ;

	board_w = 7; // Board width in squares
	board_h = 4; // Board height 
	n_boards = total; // Number of boards
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize( board_w, board_h );
	// Allocate Sotrage
	CvMat* image_points		= cvCreateMat( n_boards*board_n, 2, CV_32FC1 );
	CvMat* object_points		= cvCreateMat( n_boards*board_n, 3, CV_32FC1 );
	CvMat* point_counts		= cvCreateMat( n_boards, 1, CV_32SC1 );
	CvMat* intrinsic_matrix		= cvCreateMat( 3, 3, CV_32FC1 );
	CvMat* distortion_coeffs	= cvCreateMat( 5, 1, CV_32FC1 );

	CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];

	IplImage *image = cvLoadImage( loc );
	//IplImage *image = cvQueryFrame(capture);

	IplImage *gray_image = cvCreateImage( cvGetSize( image ), 8, 1 );

	// Capture Corner views loop until we've got n_boards
	// succesful captures (all corners on the board are found)

	while( start < total ){
		// Skp every board_dt frames to allow user to move chessboard
		//		if( frame++ % board_dt == 0 ){
		// Find chessboard corners:
		int found = cvFindChessboardCorners( image, board_sz, corners,
				&corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );


		// Get subpixel accuracy on those corners
		cvCvtColor( image, gray_image, CV_BGR2GRAY );
		cvFindCornerSubPix( gray_image, corners, corner_count, cvSize( 11, 11 ), 
				cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

		// Draw it
		cvDrawChessboardCorners( image, board_sz, corners, corner_count, found );
		if( found )
		{            
			cvSaveImage( "/tmp/grid_save.png", image);  
		}

		// If we got a good board, add it to our data
		if( corner_count == board_n ){
			step = successes*board_n;
			for( int i=step, j=0; j < board_n; ++i, ++j ){
				CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
				CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
				CV_MAT_ELEM( *object_points, float, i, 0 ) = j/board_w;
				CV_MAT_ELEM( *object_points, float, i, 1 ) = j%board_w;
				CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
			}
			CV_MAT_ELEM( *point_counts, int, successes, 0 ) = board_n;
			successes++;
		}
		//		} 


		if( start < total )
		{
			start++; 
		}
		else if ( start == total )
		{
			start = 1;
			//  return -1;
		}

		loc = argv[start] ;

		image = cvLoadImage( loc );
	} // End collection while loop
コード例 #5
0
ファイル: transcalib.cpp プロジェクト: erebuswolf/IGVC-Code
int main(int argc, char* argv[]) {
  if(argc != 6){
	printf("too few args\n");
	return -1;
  }
  // INPUT PARAMETERS:
  //
  int       board_w    = atoi(argv[1]);
  int       board_h    = atoi(argv[2]);
  int       board_n    = board_w * board_h;
  CvSize    board_sz   = cvSize( board_w, board_h );
  CvMat*    intrinsic  = (CvMat*)cvLoad(argv[3]);
  CvMat*    distortion = (CvMat*)cvLoad(argv[4]);
  IplImage* image      = 0;
  IplImage* gray_image = 0;
  if( (image = cvLoadImage(argv[5])) == 0 ) {
    printf("Error: Couldn't load %s\n",argv[5]);
    return -1;
  }
  
  CvMat* image_points      = cvCreateMat(1*board_n,2,CV_32FC1);
  CvMat* object_points     = cvCreateMat(1*board_n,3,CV_32FC1);
  
  CvMat* objdrawpoints = cvCreateMat(1,1,CV_32FC3);
  CvMat* imgdrawpoints = cvCreateMat(1,1,CV_32FC2);
  float x=0;
  float y=0;
  float z=0;
  
  double grid_width=2.85;
  gray_image = cvCreateImage( cvGetSize(image), 8, 1 );
  cvCvtColor(image, gray_image, CV_BGR2GRAY );

  CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
  int corner_count = 0;
  int found = cvFindChessboardCorners(
	gray_image,
	board_sz,
	corners,
	&corner_count,
	CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
  );
  if(!found){
	printf("Couldn't aquire chessboard on %s, "
	  "only found %d of %d corners\n",
	  argv[5],corner_count,board_n
	);
	return -1;
  }
  //Get Subpixel accuracy on those corners:
  cvFindCornerSubPix(
	gray_image,
	corners,
	corner_count,
	cvSize(11,11),
	cvSize(-1,-1),
	cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1 )
  );

// If we got a good board, add it to our data
  for( int i=0, j=0; j<board_n; ++i,++j ) {
	CV_MAT_ELEM(*image_points, float,i,0) = corners[j].x;
	CV_MAT_ELEM(*image_points, float,i,1) = corners[j].y;
	CV_MAT_ELEM(*object_points,float,i,0) =grid_width*( j/board_w);
	//  cout<<j/board_w<<" "<<j%board_w<<endl;
	CV_MAT_ELEM(*object_points,float,i,1) = grid_width*(j%board_w);
	CV_MAT_ELEM(*object_points,float,i,2) = 0.0f;
  }

  // DRAW THE FOUND CHESSBOARD
  //
  cvDrawChessboardCorners(
	image,
	board_sz,
	corners,
	corner_count,
	found
  );

  // FIND THE HOMOGRAPHY
  //
  CvMat *trans = cvCreateMat( 1, 3, CV_32F);
  CvMat *rot = cvCreateMat( 1, 3, CV_32F);

  // LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
  //
  cvFindExtrinsicCameraParams2(object_points,image_points,intrinsic,distortion,rot,trans);
  
//  cvSave("trans.xml",trans); 
//  cvSave("rot.xml",rot); 
  int key = 0;
  IplImage *drawn_image = cvCloneImage(image);
  cvNamedWindow("translation");

  // LOOP TO ALLOW USER TO PLAY WITH HEIGHT:
  //
  // escape key stops
  //
  
//  cvSetZero(trans);
//  cvSetZero(rot);
  while(key != 27) {
	cvCopy(image,drawn_image);
	
	if(key==97)x--;
	else if(key==113)x++;
	else if(key==115)y--;
	else if(key==119)y++;
	else if(key==100)z--;
	else if(key==101)z++;
	
	((float*)(objdrawpoints->data.ptr))[0]=x;
	((float*)(objdrawpoints->data.ptr))[1]=y;
	((float*)(objdrawpoints->data.ptr))[2]=z;
	printf("%f %f %f\n",x,y,z);
	cvProjectPoints2(objdrawpoints,rot,trans,intrinsic,distortion,imgdrawpoints);
	cvCircle(drawn_image,cvPoint(((float*)(imgdrawpoints->data.ptr))[0],((float*)(imgdrawpoints->data.ptr))[1]),5,cvScalar(255,0,0),-1);
	printf("%f %f\n",((float*)(imgdrawpoints->data.ptr))[0],((float*)(imgdrawpoints->data.ptr))[1]);
	cvShowImage( "translation", drawn_image );
	key = cvWaitKey(3);
  }
  cvDestroyWindow( "translation" );
  //must add a lot of memory releasing here
  return 0;
}
コード例 #6
0
ファイル: extCalib.c プロジェクト: rito96/3Asemester
int main( int argc, char** argv ){
  CvCapture* capture = NULL;
  IplImage* src = NULL;
  IplImage* src2 = NULL;
  IplImage* gray = NULL; 
  IplImage* output = NULL; 

  CvMat* cornerPoints;
  CvMat* objectPoints;
  CvMat pointsNumMat;
  CvPoint2D32f* points;
  int pointsNum[1];

  ChessBoard chess;
  int pointsPerScene;
  int detectedPointsNum;
  int allPointsFound;
  int i, j;
  char key;
  int camID;
  char* windowName = "extrinsic calibration";

  capture = cvCreateCameraCapture(0);

  if(!capture) {
    fprintf(stderr, "ERROR: capture is NULL \n");
    return(-1);
  }

  chess.dx = CHESS_ROW_DX;
  chess.dy = CHESS_COL_DY;
  chess.patternSize.width = CHESS_ROW_NUM;
  chess.patternSize.height = CHESS_COL_NUM;

  pointsPerScene 
    = chess.patternSize.width * chess.patternSize.height;


  cornerPoints = cvCreateMat(pointsPerScene, 2, CV_32F);
  objectPoints = cvCreateMat(pointsPerScene, 3, CV_32F);

  pointsNum[0] = pointsPerScene;
  pointsNumMat = cvMat(1, 1, CV_32S, pointsNum);

  points 
    = (CvPoint2D32f*)malloc( sizeof(CvPoint2D32f) * pointsPerScene ) ;

  src = cvQueryFrame(capture);

  if(src == NULL){
    fprintf(stderr, "Could not grab and retrieve frame...\n");
    return(-1);
  }

  src2 = cvCreateImage(cvSize(src->width, src->height), src->depth, 3);
  output = cvCreateImage(cvSize(src->width, src->height), src->depth, 3);
  
  cvCopy( src, src2, NULL ); 

  gray = cvCreateImage(cvSize(src2->width, src2->height), src2->depth, 1);
  
  cvNamedWindow( windowName, CV_WINDOW_AUTOSIZE );

  while( 1 ){
    src = cvQueryFrame(capture);
    if( !src ) {
      break;
    }
    cvCopy( src, src2, NULL ); 

    cvCopy( src2, output, NULL );

    cvCvtColor(src2, gray, CV_BGR2GRAY);
    
    if( cvFindChessboardCorners( gray, chess.patternSize, points, 
        &detectedPointsNum, CV_CALIB_CB_ADAPTIVE_THRESH ) ){
      cvFindCornerSubPix(gray, points, detectedPointsNum, 
        cvSize(5, 5), cvSize(-1, -1), 
        cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.1));
      allPointsFound = 1;
    } else {
      allPointsFound = 0;
    }
    
    cvDrawChessboardCorners( src2, chess.patternSize, points, 
      detectedPointsNum, allPointsFound );

    cvShowImage(windowName, src2);

    key = cvWaitKey( 20 );
    if(key == RETURN && allPointsFound ){
      store2DCoordinates( cornerPoints, points, chess, 0 );
      store3DCoordinates( objectPoints, chess, 0 );
      calibrateCamera("intrinsic_param_ref.txt", 
        "extrinsic_param.txt", 
        cornerPoints, objectPoints );
      cvSaveImage( "board.jpg", output, 0 );
      break;
    } else if(key == ESCAPE) {
      break;
    }
  }

  cvDestroyWindow( windowName );

  cvReleaseCapture(&capture);

  free(points);
  cvReleaseMat(&cornerPoints);
  cvReleaseMat(&objectPoints);
  cvReleaseImage(&gray);
  cvReleaseImage(&src2);

  return(0);
}
コード例 #7
0
ファイル: cv.jit.calibration.cpp プロジェクト: fourks/cv.jit
void cv_jit_calibration_findcorners(t_cv_jit_calibration *x, 
									t_jit_matrix_info in_minfo, 
									t_jit_matrix_info out_minfo, 
									void *in_matrix, void *out_matrix, 
									CvMat in_cv, char *out_bp ){
	
	int					board_point_nb = x->pattern_size[0]*x->pattern_size[1];
	CvPoint2D32f		*corners = new CvPoint2D32f[board_point_nb];
	int					corner_count;
	int					step;
	CvSize				pattern_size, image_size;
	IplImage			*gray_image, *color_image, in_image;
	
	//in_image = cvCreateImageHeader(cvSize(in_minfo.dim[0], in_minfo.dim[1]), 8, in_minfo.planecount);
	
	if ( x->pattern_size[0] < 3 || x->pattern_size[1] < 3 ) {
		jit_object_error((t_object *) x, "pattern_size must be at least 3 x 3");
		return;
	}
	pattern_size = cvSize( x->pattern_size[0], x->pattern_size[1] );
	image_size = cvSize(in_minfo.dim[0], in_minfo.dim[1]);
	cvGetImage	(&in_cv,	&in_image);	// create an IplImage from a CvMat
	
	// Here we create 2 copies of input matrix, a color and a grayscale
	// This is to avoid modifying the original 
	// and also to deal with different kind of images supported by the following functions
	
	color_image	=	cvCreateImage(image_size, 8, 4);
	gray_image	=	cvCreateImage(image_size, 8, 1);
	
	// convert image colorspace
	if ( in_minfo.planecount == 1 ) {
		cvCvtColor(&in_image, color_image, CV_GRAY2RGBA);
		memcpy(gray_image->imageData, in_image.imageData, in_image.imageSize);
	}
	
	else {
		cvCvtColor(&in_image, gray_image, CV_RGBA2GRAY);
		memcpy(color_image->imageData, in_image.imageData, in_image.imageSize);
	}
	
	// find chessboard corners (grayscale or color image)
	int found = cvFindChessboardCorners(&in_cv, 
										pattern_size, 
										corners, 
										&corner_count, 
										CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
	
	// get subpixel accuracy on those corners (grayscale image only)
	cvFindCornerSubPix(gray_image, 
					   corners, 
					   corner_count, 
					   cvSize(11,11), 
					   cvSize(-1,-1), 
					   cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));
	
	// draw chessboard corner (color image only)

	cvDrawChessboardCorners(color_image, pattern_size, corners, corner_count, found);
	
	x->frame++;

	if ( x->frame % x->wait_n_frame == 0 ) {

	// update arrays
		if( corner_count == board_point_nb ) {

			step = x->success_count*board_point_nb;
			for( int i=step, j=0; j<board_point_nb; ++i,++j ) {
				CV_MAT_ELEM(*x->image_points, float,i,0) = corners[j].x; 
				CV_MAT_ELEM(*x->image_points, float,i,1) = corners[j].y; 
				CV_MAT_ELEM(*x->object_points,float,i,0) = j/x->pattern_size[0]; 
				CV_MAT_ELEM(*x->object_points,float,i,1) = j%x->pattern_size[0]; 
				CV_MAT_ELEM(*x->object_points,float,i,2) = 0.0f;
			}
			CV_MAT_ELEM(*x->point_counts, int,x->success_count,0) = board_point_nb; 
			x->success_count++;
			
			// invert view
			cvNot( color_image , color_image );
		}
	}
コード例 #8
0
ファイル: cvglview.c プロジェクト: rito96/3Asemester
void* captureThread(void* args)
{
    CvCapture* capture = NULL;
    IplImage* frame;
    IplImage* frame_copy;
    IplImage* gray;
    ChessBoard chess;
    int pointsPerScene;
    CvPoint2D32f* points;
    int pointsNum[1];
    CvMat* cornerPoints;
    CvMat* objectPoints;
    CvMat pointsNumMat;
    
    CvMat* intrinsic = cvCreateMat( 3, 3, CV_64F );
    CvMat* distortion = cvCreateMat( 4, 1, CV_64F );
    CvMat* rotation = cvCreateMat( 3, 3, CV_64F );
    CvMat* translation = cvCreateMat( 3, 1, CV_64F );
    
    loadIntrinsicParams("intrinsic_param_ref.txt", intrinsic, distortion );

    capture = cvCreateCameraCapture(0);

    if(capture == NULL){
	fprintf(stderr, "ERROR: Could not open Camera Device\n");
	exit(1);
    }
    
    frame = cvQueryFrame(capture);
    
    if(frame == NULL){
	fprintf(stderr, "ERROR: Could not query frame\n");
	exit(1);
    }
    
    frame_copy = cvCreateImage(cvGetSize(frame), 
			       frame->depth, 3);

    gray = cvCreateImage(cvGetSize(frame_copy), frame_copy->depth, 1);
    
    cvNamedWindow(captureWinName, CV_WINDOW_AUTOSIZE);
    cvMoveWindow(captureWinName, graphicsWinWidth + 10, 0);

    chess.dx = CHESS_ROW_DX;
    chess.dy = CHESS_COL_DY;
    chess.patternSize.width = CHESS_ROW_NUM;
    chess.patternSize.height = CHESS_COL_NUM;
    
    pointsPerScene = chess.patternSize.width * chess.patternSize.height;
    cornerPoints = cvCreateMat(pointsPerScene, 2, CV_32F);
    objectPoints = cvCreateMat(pointsPerScene, 3, CV_32F);

    pointsNum[0] = pointsPerScene;
    pointsNumMat = cvMat(1, 1, CV_32S, pointsNum);
    
    points = (CvPoint2D32f*)malloc( sizeof(CvPoint2D32f) * pointsPerScene );

    while(1){
	int allPointsFound = 0;
	int detectedPointsNum;
	frame = cvQueryFrame( capture );
	if( !frame ) {
	    fprintf(stderr, "could not query frame\n");
	    exit(1);
	}
	
	cvResize(frame, frame_copy, CV_INTER_NN);
	cvCvtColor(frame_copy, gray, CV_BGR2GRAY);
	if( cvFindChessboardCorners( gray, chess.patternSize, points,
				     &detectedPointsNum,
				     CV_CALIB_CB_ADAPTIVE_THRESH ) ){
	    cvFindCornerSubPix(gray, points, detectedPointsNum,
			       cvSize(5, 5), cvSize(-1, -1),
			       cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.1));
	    allPointsFound = 1;
	} else {
	    allPointsFound = 0;
	}

	cvDrawChessboardCorners( frame_copy, chess.patternSize, points,
				 detectedPointsNum, allPointsFound );
	
	if( allPointsFound ){
	    double cameraPosition[3];
	    double cameraOriVec[4];
	    store2DCoordinates( cornerPoints, points, chess, 0 );
	    store3DCoordinates( objectPoints, chess, 0 );
	    calibrate( cornerPoints, objectPoints,
		       intrinsic, distortion, rotation, translation );

	    getCameraPosition(rotation, translation, cameraPosition);
	    printf("cam pos relative to chess board: %.1f, %.1f, %.1f\n", 
		   cameraPosition[0],
		   cameraPosition[1],
		   cameraPosition[2]);
	    convertCv2Gl(cameraPosition, transGL);
	    getCameraOriVec(rotation, rotGL);
	}
	
	cvShowImage( captureWinName, frame_copy);
	if(cvWaitKey(10) == KEY_ESC){
	    exit(1);
	}
    }
    
    free(points);
    cvReleaseMat(&intrinsic);
    cvReleaseMat(&distortion);
    cvReleaseMat(&rotation);
    cvReleaseMat(&translation);
    
    cvReleaseMat(&cornerPoints);
    cvReleaseMat(&objectPoints);
    
    cvDestroyWindow(captureWinName);
    cvReleaseImage(&frame_copy);
    cvReleaseImage(&gray);
    cvReleaseCapture(&capture);
}
コード例 #9
0
int main(int argc, const char **argv)
{
  // Instantiate a ModelManager:
  ModelManager manager("Test wiimote");

  nub::ref<OutputFrameSeries> ofs(new OutputFrameSeries(manager));
  manager.addSubComponent(ofs);

  nub::ref<InputFrameSeries> ifs(new InputFrameSeries(manager));
  manager.addSubComponent(ifs);

  // Parse command-line:
  if (manager.parseCommandLine(argc, argv, "", 0, 0) == false) return(1);

  manager.start();



  //Init camara params
        itsIntrinsicMatrix = cvCreateMat( 3, 3, CV_32FC1);
        itsDistortionCoeffs = cvCreateMat( 4, 1, CV_32FC1);
  itsCameraRotation = cvCreateMat( 1, 3, CV_64FC1);
  itsCameraTranslation = cvCreateMat( 1, 3, CV_64FC1);

  //cvmSet(itsDistortionCoeffs, 0, 0, -0.2403274);
  //cvmSet(itsDistortionCoeffs, 1, 0, 2.5312502);
  //cvmSet(itsDistortionCoeffs, 2, 0, -0.0439848);
  //cvmSet(itsDistortionCoeffs, 3, 0, -0.0106820);
  cvmSet(itsDistortionCoeffs, 0, 0, 0);
  cvmSet(itsDistortionCoeffs, 1, 0, 0);
  cvmSet(itsDistortionCoeffs, 2, 0, 0);
  cvmSet(itsDistortionCoeffs, 3, 0, 0);

  cvmSet(itsCameraRotation, 0, 0, 2.391102);
  cvmSet(itsCameraRotation, 0, 1, 0);
  cvmSet(itsCameraRotation, 0, 2, 0);

  cvmSet(itsCameraTranslation, 0, 0, 0);
  cvmSet(itsCameraTranslation, 0, 1, 0);
  cvmSet(itsCameraTranslation, 0, 2, 840.954432);


  //cvmSet(itsIntrinsicMatrix, 0, 0, 290.85342); cvmSet(itsIntrinsicMatrix, 0, 1, 0); cvmSet(itsIntrinsicMatrix, 0, 2, 320/2); //159.50000);
  //cvmSet(itsIntrinsicMatrix, 1, 0, 0); cvmSet(itsIntrinsicMatrix, 1, 1, 290.85342 ); cvmSet(itsIntrinsicMatrix, 1, 2, 240/2); // 119.5);
  //cvmSet(itsIntrinsicMatrix, 2, 0, 0); cvmSet(itsIntrinsicMatrix, 2, 1, 0); cvmSet(itsIntrinsicMatrix, 2, 2, 1);

  cvmSet(itsIntrinsicMatrix, 0, 0, 415.5); cvmSet(itsIntrinsicMatrix, 0, 1, 0); cvmSet(itsIntrinsicMatrix, 0, 2, 320/2); //159.50000);
  cvmSet(itsIntrinsicMatrix, 1, 0, 0); cvmSet(itsIntrinsicMatrix, 1, 1, 436 ); cvmSet(itsIntrinsicMatrix, 1, 2, 240/2); // 119.5);
  cvmSet(itsIntrinsicMatrix, 2, 0, 0); cvmSet(itsIntrinsicMatrix, 2, 1, 0); cvmSet(itsIntrinsicMatrix, 2, 2, 1);

  bool drawGrid = true;
  bool saveCorners = false;
  bool calibrate = false;

  std::vector<CvPoint2D32f> allCorners;

  while(1)
  {

    GenericFrame input = ifs->readFrame();
    Image<PixRGB<byte> > img = input.asRgb();


    int rows = 4, cols = 3;

    std::vector<CvPoint2D32f> corners = findCorners(img, rows, cols);

    if (corners.size() == (uint)(rows*cols))
    {
      if (saveCorners)
        for(uint i=0; i<corners.size(); i++)
          allCorners.push_back(corners[i]);
      saveCorners = false;

      cvDrawChessboardCorners(img2ipl(img), cvSize(rows,cols), &corners[0], corners.size(), 1);
    }

    if (calibrate)
    {
      calibrateViews(allCorners, rows, cols);
      if (corners.size() == (uint)(rows*cols))
        findExtrinsic(corners, rows, cols);
      calibrate = false;
    }

    if (drawGrid)
      projectGrid(img);

    projectRect(img, 216.5, 279.5);


    processUserInput(ofs, drawGrid, saveCorners, calibrate);

    ofs->writeRGB(img, "Output", FrameInfo("Output", SRC_POS));

    ofs->updateNext();
  }

  // stop all our ModelComponents
  manager.stop();

  // all done!
  return 0;
}
コード例 #10
0
ファイル: main.cpp プロジェクト: uprun/GraduateWork
int main()
{

	if(run_tests_only)
	{
		MyLine3D::runTest();
		return 0;
	}

	//CvMat *camera_inner_calibration_matrix; 
	bool show_surf_example=false;
	bool show_calibration_from_camera_and_undistortion=false;
	if(show_calibration_from_camera_and_undistortion)
	{
		CvMat *object_points_all=0;
		CvMat *image_points_all=0;
		CvMat *points_count_all=0;
		CvMat *camera_matr=0;
		CvMat *distor_coefs=0;
		CvMat *rotation_vecs=0;
		CvMat *transpose_vecs=0;
		vector<CvPoint2D32f> qu_calibr_points;
		IplImage* frameCam1;
		cvNamedWindow("WindowCam1",CV_WINDOW_KEEPRATIO);
		CvCapture *captureCam1=cvCreateCameraCapture(0);
		IplImage *quarterFrame;
		CvPoint2D32f *cornersFounded= new CvPoint2D32f[100];
		int cornersCount=0;
		int result_Found=0;
		// getting snapshots for inner camera calibration from video camera
		bool capture_flag=false;
		while(true)
		{
			frameCam1=cvQueryFrame(captureCam1);
			quarterFrame=cvCreateImage(cvSize((frameCam1->width),(frameCam1->height)),IPL_DEPTH_8U,3);
		
			cvCopy(frameCam1,quarterFrame);
			if(capture_flag)
			{
				result_Found=cvFindChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,&cornersCount);//,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS |CV_CALIB_CB_FAST_CHECK);
				cvDrawChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,cornersCount,result_Found);
				if(result_Found>0)
					AddPointsToInnerCalibrate(qu_calibr_points,cornersFounded,cornersCount);
				capture_flag=false;
				cvShowImage("WindowCam1",quarterFrame);
				if(result_Found>0)
					cvWaitKey(0);
			}
			char c=cvWaitKey(33);
			if(c==27)
				break;
			if(c==32 || c=='y' || c=='Y')
				capture_flag=true;
			cvShowImage("WindowCam1",quarterFrame);
			cvReleaseImage(&quarterFrame);
		
		}
		cvReleaseImage(&quarterFrame);
	
		cvReleaseCapture(&captureCam1);
		cvDestroyWindow("WindowCam1");
	
		PrintAllPointsForInnerCalibrate(qu_calibr_points,chess_b_szW*chess_b_szH);
		InitCvMatPointsParametersForInnerCallibration_part1(qu_calibr_points,chess_b_szW*chess_b_szH,object_points_all,image_points_all,points_count_all,chess_b_szW,chess_b_szH);
		InitOtherCameraParametersForInnerCallibration_part2(qu_calibr_points.size()/(chess_b_szW*chess_b_szH),camera_matr,distor_coefs,rotation_vecs,transpose_vecs);
		double calibration_error_result=cvCalibrateCamera2(object_points_all,
													image_points_all,
													points_count_all,
													cvSize(imgW,imgH),
													camera_matr,
													distor_coefs,
													rotation_vecs,
													transpose_vecs,
													CV_CALIB_FIX_PRINCIPAL_POINT|CV_CALIB_FIX_ASPECT_RATIO|CV_CALIB_ZERO_TANGENT_DIST
													);
		WriteMatrixCoef(camera_matr);
		WriteMatrixCoef(distor_coefs);
		//camera_inner_calibration_matrix=cvCreateMat(3,3,CV_32FC1);
		//cvCopy(camera_matr,camera_inner_calibration_matrix);
		cvSave("camera_calibration_inner.txt",camera_matr,"camera_inner_calibration_matrix");
		cvSave("camera_calibration_dist.txt",distor_coefs,"distor_coefs","coeficients of distortions");
		cout<<"Total Error:"<<calibration_error_result<<endl;
		cout<<"Average Calibration Error :"<<(calibration_error_result)/qu_calibr_points.size()<<endl;
	//undistortion example
		IplImage *frame_cur;
		IplImage *undistor_image;
		cvNamedWindow("cameraUndistor",CV_WINDOW_KEEPRATIO);
		CvCapture *captureCam2=cvCreateCameraCapture(0);
		bool undist_flag=false;
		while(true)
		{
			frame_cur= cvQueryFrame(captureCam2);
			undistor_image=cvCreateImage(cvSize((frame_cur->width),(frame_cur->height)),IPL_DEPTH_8U,3);
			if(undist_flag)
			{
				cvUndistort2(frame_cur,undistor_image,camera_matr,distor_coefs);
			}
			else
			{
				cvCopy(frame_cur,undistor_image);
			}
			cvShowImage("cameraUndistor",undistor_image);
			char c=cvWaitKey(33);
			if(c==27)
				break;
			if(c=='u'||c=='U')
				undist_flag=!undist_flag;

			cvReleaseImage(&undistor_image);

		}
		cvReleaseImage(&undistor_image);
		cvReleaseCapture(&captureCam2);
		cvDestroyWindow("cameraUndistor");
	}//ending undistortion_example
	
	if(show_surf_example)
	{
		//using SURF
		
		initModule_nonfree();// added at 16.04.2013
		CvCapture* capture_cam_3=cvCreateCameraCapture(0);
		cvNamedWindow("SURF from Cam",CV_WINDOW_KEEPRATIO);
		cvCreateTrackbar("Hessian Level","SURF from Cam",0,1000,onTrackbarSlide1);
		IplImage* buf_frame_3=0;
		IplImage* gray_copy=0;
		IplImage* buf_frame_3_copy=0;
	
		CvSeq *kp1,*descr1;
		CvMemStorage *storage=cvCreateMemStorage(0);
	
		CvSURFPoint *surf_pt;
		bool surf_flag=false;
		while(true)
		{
			buf_frame_3=cvQueryFrame(capture_cam_3);
		
			if(surf_flag)
			{
				surf_flag=false;
				gray_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,1);
				buf_frame_3_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,3);
			
				cvCvtColor(buf_frame_3,gray_copy,CV_RGB2GRAY);
				//cvSetImageROI(gray_copy,cvRect(280,200,40,40));
				cvExtractSURF(gray_copy,NULL,&kp1,&descr1,storage,cvSURFParams(0.0,0));
				cvReleaseImage(&gray_copy);
				re_draw=true;
			
				while(true)
				{
					if(re_draw)
					{
			
						cvCopy(buf_frame_3,buf_frame_3_copy);
						double pi=acos(-1.0);
						for(int i=0;i<kp1->total;i++)
						{
							surf_pt=(CvSURFPoint*)cvGetSeqElem(kp1,i);
							if(surf_pt->hessian<min_hessian)
								continue;
							int pt_x,pt_y;
							pt_x=(int)(surf_pt->pt.x);
							pt_y=(int)(surf_pt->pt.y);
							int sz=surf_pt->size;
							int rad_angle=(surf_pt->dir*pi)/180;
				
							cvCircle(buf_frame_3_copy,cvPoint(pt_x,pt_y),1/*sz*/,CV_RGB(0,255,0));
							cvLine(buf_frame_3_copy,cvPoint(pt_x,pt_y),cvPoint(pt_x+sz*cosl(rad_angle),pt_y-sz*sinl(rad_angle)),CV_RGB(0,0,255));
						}
						cvShowImage("SURF from Cam",buf_frame_3_copy);
					
					}
					char c=cvWaitKey(33);
					if(c==27)
					{
					
					
						break;
					}
				}
				cvReleaseImage(&buf_frame_3_copy);
			}
			
			cvShowImage("SURF from Cam",buf_frame_3);
			char ch=cvWaitKey(33);
			if(ch==27)
				break;
			if(ch==32)
				surf_flag=true;
		
		}
		if(gray_copy!=0)
			cvReleaseImage(&gray_copy);
		cvReleaseCapture(&capture_cam_3);
		cvDestroyWindow("SURF from Cam");
	}//ending SURF_example

	CvFont my_font=cvFont(1,1);
	cvInitFont(&my_font,CV_FONT_HERSHEY_SIMPLEX,1.0,1.0);

	cvNamedWindow("twoSnapshots",CV_WINDOW_KEEPRATIO);
	cvCreateTrackbar("Select LLine","twoSnapshots",0,1000,onTrackbarSlideSelectLine);
	CvCapture *capture_4 = 0;
	
	IplImage* left_img=0;
	IplImage* right_img=0;
	IplImage* cur_frame_buf=0;
	IplImage* gray_img_left=0;
	IplImage* gray_img_right=0;
	IplImage* merged_images=0;
	IplImage* merged_images_copy=0;
	CvMat *fundamentalMatrix = 0;
	vector<KeyPoint> key_points_left;
	Mat descriptors_left; 
	vector<KeyPoint> key_points_right;
	Mat descriptors_right;
	//CvMemStorage *mem_stor=cvCreateMemStorage(0);*/
	float min_hessian_value=1001.0f;

	double startValueOfFocus = 350;

	char* left_image_file_path = "camera_picture_left.png";
	char* right_image_file_path = "camera_picture_right.png";

	Array left_points, right_points;
	left_points.init(1,1);
	right_points.init(1,1);
	Array forReconstructionLeftPoints, forReconstructionRightPoints;
	forReconstructionLeftPoints.init(1,1);
	forReconstructionRightPoints.init(1,1);

	

	while(true)
	{
		char ch=cvWaitKey(33);
		if(ch==27)
			break;
		// open left and right images
		if(ch == 'o' || ch == 'O')
		{
			openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img );
			MergeTwoImages(left_img,right_img,merged_images);
		}
		// save both left and right images from camera
		if(ch == 's' || ch == 'S')
		{
			if( left_img != 0 )
				cvSaveImage(left_image_file_path, left_img);
			if( right_img != 0)
				cvSaveImage(right_image_file_path, right_img);
		}

		if(ch=='l'||ch=='L')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			
			cur_frame_buf=cvQueryFrame(capture_4);
			if(left_img==0)
				left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
			cvCopy(cur_frame_buf,left_img);

			if(right_img == 0)
			{
				right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
				cvCopy(cur_frame_buf,right_img);
			}

			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='r'||ch=='R')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			cur_frame_buf=cvQueryFrame(capture_4);
			if(right_img==0)
				right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
			cvCopy(cur_frame_buf,right_img);

			if(left_img == 0)
			{
				left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
				cvCopy(cur_frame_buf,left_img);
			}
			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='b'||ch=='B')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			cur_frame_buf=cvQueryFrame(capture_4);
			cvCopy(cur_frame_buf,left_img);
			cvCopy(cur_frame_buf,right_img);
		}
		if(ch=='q'||ch=='Q' && left_img!=0)
		{
			//proceed left
			extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left);

		}
		if(ch=='w'||ch=='W' && right_img!=0)
		{
			//proceed right
			extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right);			

		}
		if(ch=='m'||ch=='M' && left_img!=0 && right_img!=0)
		{
			//merge two images in to bigger one
			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='c'||ch=='C' && merged_images!=0)
		{
			//comparison of two images
			if(fundamentalMatrix != 0)
			{
				cvReleaseMat(& fundamentalMatrix);
				fundamentalMatrix = 0;
			}
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();
			
			GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points);
		}

		if(ch == 'E' || ch == 'e')
		{
			//drawing lines for corresponding points
			KeyPoint *leftPoint,*rightPoint,*leftPoint2,*rightPoint2;
			int width_part=merged_images->width>>1;
			/*for(int iL=0;iL<left_to_right_corresponding_points.size();iL++)
			{
				leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left,left_to_right_corresponding_points[iL].first);
				rightPoint=(CvSURFPoint*)cvGetSeqElem(key_points_right,left_to_right_corresponding_points[iL].second);
				cvLine(merged_images,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(255,0,0));
			}*/
			
			int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size();
			bool* acceptedLeftToRightCorrespondings = 0;
			getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points,
				key_points_left,
				key_points_right,
				fundamentalMatrix,
				acceptedLeftToRightCorrespondings,
				sizeOfAccepptedLeftToRightCorrespondings);

			
			while(true)
			{
				merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3);
				cvCopy(merged_images,merged_images_copy);
				int iL=selectedLeftLine;
				int iR=iL;
				if(iL>=left_to_right_corresponding_points.size())
					iL=left_to_right_corresponding_points.size()-1;
				if(iR>=right_to_left_corresponding_points.size())
					iR=right_to_left_corresponding_points.size()-1;
				char str[100]={0};
				if(iL >= 0 )
				{
					bool isLeftToRightLineIsAccepted = acceptedLeftToRightCorrespondings[iL];
				
					// difference value
					sprintf(str,"%f",left_to_right_corresponding_points[iL].comparer_value);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-40),&my_font,CV_RGB(0,255,0));
					// count of Matches
					sprintf(str,"%d",left_to_right_corresponding_points[iL].counterOfMatches);
					cvPutText(merged_images_copy,str,cvPoint(200,merged_images_copy->height-40),&my_font,CV_RGB(255,255,0));
					// median of compared values
					sprintf(str,"%lf",left_to_right_corresponding_points[iL].medianOfComparedMatches);
					cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0));

					// Variance of compared values
					sprintf(str,"V=%lf",left_to_right_corresponding_points[iL].Variance());
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0));

					// Standard deviation of compared values
					sprintf(str,"SD=%lf",sqrt( left_to_right_corresponding_points[iL].Variance() ));
					cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0));

					double SD = sqrt( left_to_right_corresponding_points[iL].Variance() ) ;
					double median = left_to_right_corresponding_points[iL].medianOfComparedMatches;
					double compValue = left_to_right_corresponding_points[iL].comparer_value;
					double mark_1_5 = median - 1.5 * SD - compValue;

					// Mark 1.5
					sprintf(str,"m1.5=%lf", mark_1_5);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-120),&my_font,CV_RGB(0,255,0));

					sprintf(str,"angle=%lf", left_to_right_corresponding_points[iL].degreesBetweenDeltaVector);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-150),&my_font,CV_RGB(0,255,0));

					

					leftPoint= &(key_points_left[ left_to_right_corresponding_points[iL].comp_pair.first ]);
					rightPoint=&(key_points_right[ left_to_right_corresponding_points[iL].comp_pair.second ]);
				
					cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(0,255,0));

					drawEpipolarLinesOnLeftAndRightImages(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y),
						cvPoint(rightPoint->pt.x,rightPoint->pt.y), fundamentalMatrix);

					CvScalar color = CV_RGB(255, 0, 0);
					if(isLeftToRightLineIsAccepted)
					{
						color = CV_RGB(0,255,0);
					}

					cvCircle(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y), 5, color);
					cvCircle(merged_images_copy, cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y), 5, color);
				}
				//cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x,rightPoint->pt.y),CV_RGB(255,0,255));
				if(iR >= 0 )
				{
					sprintf(str,"%f",right_to_left_corresponding_points[iR].comparer_value);
					cvPutText(merged_images_copy,str,cvPoint(width_part,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0));
					rightPoint2= &(key_points_right [right_to_left_corresponding_points[iR].comp_pair.first]);
					leftPoint2= &(key_points_left [right_to_left_corresponding_points[iR].comp_pair.second]);
					cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,0));
				}
				//cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x+width_part,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,255));
				
				cvShowImage("twoSnapshots",merged_images_copy);
				cvReleaseImage(&merged_images_copy);
				char ch2=cvWaitKey(33);
				if(ch2==27)
					break;
				if(ch2=='z' && selectedLeftLine>0)
				{
					selectedLeftLine--;
				}
				if(ch2=='x' && selectedLeftLine<1000)
				{
					selectedLeftLine++;
				}
				if( ch2 == 'a' || ch2 == 'A')
				{
					acceptedLeftToRightCorrespondings[selectedLeftLine] = true;
				}
				if( ch2 == 'd' || ch2 == 'D')
				{
					acceptedLeftToRightCorrespondings[selectedLeftLine] = false;
				}
			}//end of while(true)

			SaveAcceptedCorresspondings(
					left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings
					);
			ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings,
					left_points,
					right_points
					);


			delete[] acceptedLeftToRightCorrespondings;
		}
		if( ch == 'T' || ch == 't')
		{
			clock_t startTime = clock();

			openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img );
			// proceed left
			extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left);
			//proceed right
			extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right);	
			//comparison of two images
			if(fundamentalMatrix != 0)
			{
				cvReleaseMat(& fundamentalMatrix);
				fundamentalMatrix = 0;
			}
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();
			
			GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points);

			// searching fundamental matrix and corresponding points
			findFundamentalMatrixAndCorrespondingPointsForReconstruction(
				left_to_right_corresponding_points,
				right_to_left_corresponding_points,
				fundamentalMatrix,
				key_points_left,
				key_points_right,
				descriptors_left,
				descriptors_right,
				left_img,
				right_img,
				gray_img_left,
				gray_img_right,
				forReconstructionLeftPoints,
				forReconstructionRightPoints,
				min_hessian_value, 450);
			// selecting points for finding model parameters

			int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size();
			bool* acceptedLeftToRightCorrespondings = 0;
			getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points,
				key_points_left,
				key_points_right,
				fundamentalMatrix,
				acceptedLeftToRightCorrespondings,
				sizeOfAccepptedLeftToRightCorrespondings);

			ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings,
					left_points,
					right_points
					);

			delete[] acceptedLeftToRightCorrespondings;

			// start process of determination parameters of model and reconstruction of scene
			cv::Mat mat_left_img(left_img, true);
			cv::Mat mat_right_img(right_img, true);
			mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, 
				mat_left_img, mat_right_img,
				forReconstructionLeftPoints, forReconstructionRightPoints);
			mat_left_img.release();
			mat_right_img.release();


			cout << "Code execution time: "<< double( clock() - startTime ) / (double)CLOCKS_PER_SEC<< " seconds." << endl;
		}
		if( ch == 'I' || ch == 'i')
		{	

			//-- Step 3: Matching descriptor vectors using FLANN matcher
			FlannBasedMatcher matcher;
			std::vector< DMatch > matches;
			matcher.match( descriptors_left, descriptors_right, matches );

			//double max_dist = 0; double min_dist = 100;

			////-- Quick calculation of max and min distances between keypoints
			//for( int i = 0; i < descriptors_left.rows; i++ )
			//{ double dist = matches[i].distance;
			//	if( dist < min_dist ) min_dist = dist;
			//	if( dist > max_dist ) max_dist = dist;
			//}

			//printf("-- Max dist : %f \n", max_dist );
			//printf("-- Min dist : %f \n", min_dist );

			//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
			//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
			//-- small)
			//-- PS.- radiusMatch can also be used here.
			//std::vector< DMatch > good_matches;
			
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();

			for( int i = 0; i < descriptors_left.rows; i++ )
			{ 
				//if( matches[i].distance <= max(2*min_dist, 0.02) )
				{
					//good_matches.push_back( matches[i]); 
					left_to_right_corresponding_points.push_back( ComparedIndexes(matches[i].distance, pair<int, int> (i, matches[i].trainIdx)) );
				}
			}
			
			cout<< "Count of good matches :" << left_to_right_corresponding_points.size() << endl;

			stable_sort(left_to_right_corresponding_points.begin(),left_to_right_corresponding_points.end(),my_comparator_for_stable_sort);
		}

		//if( ch == 'K' || ch == 'k')
		//{
		//	CvSURFPoint *leftPoint;
		//	//proceed left
		//	gray_img_left=cvCreateImage(cvSize((left_img->width),(left_img->height)),IPL_DEPTH_8U,1);
		//	cvCvtColor(left_img,gray_img_left,CV_RGB2GRAY);
		//	cvExtractSURF(gray_img_left,NULL,&key_points_left,&descriptors_left,mem_stor,cvSURFParams(min_hessian_value,0));

		//	cv::Mat mat_gray_leftImage(gray_img_left, true);
		//	cvReleaseImage(&gray_img_left);
		//	// proceed right
		//	gray_img_right=cvCreateImage(cvSize((right_img->width),(right_img->height)),IPL_DEPTH_8U,1);
		//	cvCvtColor(right_img,gray_img_right,CV_RGB2GRAY);
		//	cv::Mat mat_gray_rightImage(gray_img_right, true);
		//	cvReleaseImage(&gray_img_right);
		//	vector<Point2f> LK_left_points;
		//	vector<Point2f> LK_right_points;

		//	LK_right_points.resize(key_points_left->total);

		//	for( int i = 0; i < key_points_left->total; i++)
		//	{
		//		leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left, i);
		//		LK_left_points.push_back(Point2f( leftPoint->pt.x, leftPoint->pt.y));
		//	}
		//	
		//	vector<uchar> status;
  //          vector<float> err;

		//	cv::calcOpticalFlowPyrLK(
		//		mat_gray_leftImage,
		//		mat_gray_rightImage, 
		//		LK_left_points,
		//		LK_right_points, 
		//		status,
		//		err);
		//	int width_part=merged_images->width>>1;
		//	
		//	float minErr = err[0];

		//	for(int k = 0; k < err.size(); k++)
		//	{
		//		if(status[k] && err[k] < minErr) 
		//		{
		//			minErr = err[k];
		//		}
		//	}

		//	cout<< "Lucass Kanade min error: " << minErr<< endl;

		//	int i = 0;
		//	merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3);
		//	cvCopy(merged_images,merged_images_copy);
		//	for(; i < LK_left_points.size(); ++i)
		//	{
		//		if(err[i] < 5 * minErr && status[i])
		//		{
		//			cvLine(merged_images_copy,cvPoint(LK_left_points[i].x,LK_left_points[i].y),cvPoint(LK_right_points[i].x+width_part,LK_right_points[i].y),
		//					CV_RGB(100 + (( i *3) % 155), 100+ ((i*7)%155), 100+ ((i*13)%155)));
		//		}
		//	}

		//	cvShowImage("twoSnapshots",merged_images_copy);
		//		
		//	while(true)
		//	{

		//		char ch2=cvWaitKey(33);
		//		if(ch2==27)
		//			break;
		//		
		//	}
		//	
		//	cvReleaseImage(&merged_images_copy);

		//	status.clear();
		//	err.clear();
		//	LK_left_points.clear();
		//	LK_right_points.clear();
		//	mat_gray_leftImage.release();
		//	mat_gray_rightImage.release();
		//}

		if( ch == 'F' || ch == 'f')
		{
			findFundamentalMatrixAndCorrespondingPointsForReconstruction(
				left_to_right_corresponding_points,
				right_to_left_corresponding_points,
				fundamentalMatrix,
				key_points_left,
				key_points_right,
				descriptors_left,
				descriptors_right,
				left_img,
				right_img,
				gray_img_left,
				gray_img_right,
				forReconstructionLeftPoints,
				forReconstructionRightPoints,
				min_hessian_value);


		}
		if( ch == 'P' || ch == 'p')
		{
			cv::Mat mat_left_img(left_img, true);
			cv::Mat mat_right_img(right_img, true);
			mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, 
				mat_left_img, mat_right_img,
				forReconstructionLeftPoints, forReconstructionRightPoints);
			mat_left_img.release();
			mat_right_img.release();
		}
		if(merged_images!=0)
		{
			cvShowImage("twoSnapshots",merged_images);
		}
		
	}
コード例 #11
0
ファイル: birds_eye_calib.c プロジェクト: ehan1990/snowbots
int main(int argc, char* argv[]) {
	
int board_n = 0;
CvMat* image_points;
CvMat* object_points;
CvMat* point_counts;
CvMat* intrinsic_matrix;
CvMat* distortion_coeffs;
CvMat image_pointsz[4];
CvMat object_pointsz[4];
CvMat point_countsz[4];
CvMat intrinsic_matrixz[4];
CvMat distortion_coeffsz[4];
CvSize board_sz;
CvCapture* capture = 0;
CvPoint2D32f* corners =0;
CvPoint2D32f cornersz[4];
int corner_count;
int successes = 0;
int step, frame = 0;
IplImage *image;
IplImage *gray_image;
int found;
int i,j,c;
CvMat* object_points2;
CvMat* image_points2;
CvMat* point_counts2;
CvMat *intrinsic;
CvMat *distortion;
CvMat object_points2z[4];
CvMat image_points2z[4];
CvMat point_counts2z[4];
CvMat intrinsicz[4];
CvMat distortionz[4];
IplImage* mapx;
IplImage* mapy;
IplImage* t;


if(argc != 4){
printf("ERROR: Wrong number of input parameters\n");
return -1;
}

board_w = atoi(argv[1]);
board_h = atoi(argv[2]);
n_boards = atoi(argv[3]);


capture = cvCreateCameraCapture( 0 );
assert( capture );
board_sz = cvSize( board_w, board_h );
board_n = board_w * board_h;


cvNamedWindow( "Calibration", 1 );

printf("\n Calibration   \n");

//ALLOCATE STORAGE
corners = &cornersz[0];
image_points  = &image_pointsz[0];
object_points  = &object_pointsz[0];
point_counts  = &point_countsz[0];
intrinsic_matrix  = &intrinsic_matrixz[0];
distortion_coeffs  = &distortion_coeffsz[0];

object_points2  = &object_points2z[0];
image_points2  = &image_points2z[0];
point_counts2  = &point_counts2z[0];
intrinsic  = &intrinsicz[0];
distortion  = &distortionz[0];
	
*corners = cvPoint2D32f((board_w*1.0), (board_h*1.0));

image_points = cvCreateMat(n_boards*board_n,2,CV_32FC1);
object_points = cvCreateMat(n_boards*board_n,3,CV_32FC1);
point_counts = cvCreateMat(n_boards,1,CV_32SC1);
intrinsic_matrix = cvCreateMat(3,3,CV_32FC1);
distortion_coeffs = cvCreateMat(4,1,CV_32FC1);   //5,1 crashes the code


image = cvQueryFrame( capture );
gray_image = cvCreateImage(cvGetSize(image),8,1);//subpixel

// CAPTURE CORNER VIEWS LOOP UNTIL WE'VE GOT n_boards
// SUCCESSFUL CAPTURES (ALL CORNERS ON THE BOARD ARE FOUND)
//
while(successes < n_boards) {
//Skip every board_dt frames to allow user to move chessboard

	
if(frame++ % board_dt == 0) {

//Find chessboard corners:
found = cvFindChessboardCorners(
	image, 
	board_sz, 
	corners, 
	&corner_count,
	CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
);
	
printf(".");
	
//Get Subpixel accuracy on those corners
cvCvtColor(image, gray_image, CV_BGR2GRAY);
	
cvFindCornerSubPix(
	gray_image, 
	corners, 
	corner_count,
	cvSize(11,11),
	cvSize(-1,-1), 
	cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
	
//Draw it
cvDrawChessboardCorners(
	image, 
	board_sz, 
	corners,
	corner_count, 
	found);
	
cvShowImage( "Calibration", image );

	// If we got a good board, add it to our data
	if( corner_count == board_n ) {

		printf("\n Success: good board was seen \n");
			
			step = successes*board_n;
		
			for(i=step, j=0; j<board_n; ++i,++j ) {
				CV_MAT_ELEM(*image_points, float,i,0) = corners[j].x;
				CV_MAT_ELEM(*image_points, float,i,1) = corners[j].y;
				CV_MAT_ELEM(*object_points,float,i,0) = j/board_w;
				CV_MAT_ELEM(*object_points,float,i,1) = j%board_w;
				CV_MAT_ELEM(*object_points,float,i,2) = 0.0f;
			}
		
			CV_MAT_ELEM(*point_counts, int,successes,0) = board_n;
		successes++;
	}
	
} //end skip board_dt between chessboard capture
コード例 #12
0
int main(int argc, char* argv[]) {
  
  CvCapture* capture;// = cvCreateCameraCapture( 0 );
  // assert( capture );

  if(argc != 4){
    help();
    return -1;
  }
  board_w = atoi(argv[1]);
  board_h = atoi(argv[2]);
  int board_n  = board_w * board_h;
  CvSize board_sz = cvSize( board_w, board_h );
  FILE *fptr = fopen(argv[3],"r");
  char names[2048];
  //COUNT THE NUMBER OF IMAGES:
  while(fscanf(fptr,"%s ",names)==1){
    n_boards++;
  }
  rewind(fptr);

  cvNamedWindow( "Calibration" );

  //ALLOCATE STORAGE
  CvMat* image_points      = cvCreateMat(n_boards*board_n,2,CV_32FC1);
  CvMat* object_points     = cvCreateMat(n_boards*board_n,3,CV_32FC1);
  CvMat* point_counts      = cvCreateMat(n_boards,1,CV_32SC1);

  CvMat* intrinsic_matrix  = cvCreateMat(3,3,CV_32FC1);
  CvMat* distortion_coeffs = cvCreateMat(4,1,CV_32FC1);
  CvMat* rotation = cvCreateMat(n_boards,3,CV_32FC1);
  CvMat* translation   = cvCreateMat(n_boards,3,CV_32FC1);

  IplImage* image = 0;// = cvQueryFrame( capture );
  IplImage* gray_image = 0; //for subpixel
  CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
  int corner_count;
  int successes = 0;
  int step;

  for( int frame=0; frame<n_boards; frame++ ) {
    fscanf(fptr,"%s ",names);

    if(image){
      cvReleaseImage(&image);
      image = 0;
    }
    image = cvLoadImage( names);
    if(gray_image == 0  && image) //We'll need this for subpixel accurate stuff
      gray_image = cvCreateImage(cvGetSize(image),8,1);

    if(!image)
      printf("null image\n");

    int found = cvFindChessboardCorners(
                                        image,
                                        board_sz,
                                        corners,
                                        &corner_count, 
                                        CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
                                        );

    //Get Subpixel accuracy on those corners
    cvCvtColor(image, gray_image, CV_BGR2GRAY);
    cvFindCornerSubPix(gray_image, corners, corner_count, 
                       cvSize(11,11),cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
    //Draw it

    cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
    cvShowImage( "Calibration", image );

    // If we got a good board, add it to our data
    //
    if( corner_count == board_n ) {
      step = successes*board_n;
      //	printf("Found = %d for %s\n",found,names);
      for( int i=step, j=0; j<board_n; ++i,++j ) {
        CV_MAT_ELEM(*image_points, float,i,0) = corners[j].x;
        CV_MAT_ELEM(*image_points, float,i,1) = corners[j].y;
        CV_MAT_ELEM(*object_points,float,i,0) = j/board_w;
        CV_MAT_ELEM(*object_points,float,i,1) = j%board_w;
        CV_MAT_ELEM(*object_points,float,i,2) = 0.0f; 		
        
      }
      //        CV_MAT_ELEM(*point_counts, int,0,successes) = board_n;
      CV_MAT_ELEM(*point_counts, int,successes,0) = board_n;		
      successes++;
    }

    int c = cvWaitKey(15);
    if(c == 'p') {
      c = 0;
      while(c != 'p' && c != 27){
        c = cvWaitKey(250);
      }
    }
    if(c == 27)
      return 0;
  }
コード例 #13
0
int CalibrateCamera(CvCapture *capture, RImageWindow *camWnd, T_CalibInput *cInput, T_CalibOutput *cOutput)
{
    // Set camera window to display it's label
    camWnd->SetDrawLabel(true);

    // Some additional variables
    int board_n = cInput->camBoardWidth * cInput->camBoardHeight;
    CvSize board_sz = cvSize(cInput->camBoardWidth, cInput->camBoardHeight);

    // Allocate storage
    CvMat* image_points = cvCreateMat(cInput->numberBoards * board_n, 2, CV_32FC1);
    CvMat* object_points = cvCreateMat(cInput->numberBoards * board_n, 3, CV_32FC1);
    CvMat* point_counts = cvCreateMat(cInput->numberBoards, 1, CV_32SC1);
    cOutput->camIntrinsic = cvCreateMat(3, 3, CV_32FC1);
    cOutput->camDistortion = cvCreateMat(5, 1, CV_32FC1);

    // Prepare some other variables
    CvPoint2D32f* corners = (CvPoint2D32f*)malloc(sizeof(CvPoint2D32f) * board_n);  // Board corners

    int corner_count;
    int successes = 0;
    int step, frame = 0;

    // Get image from the camera capture
    IplImage *image = cvQueryFrame(capture);
    IplImage *gray_image = cvCreateImage(cvGetSize(image), 8, 1);   // Subpixel

    // Capture corner views loop until we've got n_boards
    // Successful captures = all corners on the board are found
    while(successes < cInput->numberBoards)
    {
        // Skip every cInput->camSnapshotRate frames to allow user to move chessboard
        if(frame++ % 5 == 0)
        {
            // Find chessboard corners
            int found = cvFindChessboardCorners(image, board_sz, corners, &corner_count,
                    CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

            // Get subpixel accuracy on those corners
            cvCvtColor(image, gray_image, CV_BGR2GRAY);
            cvFindCornerSubPix(gray_image, corners, corner_count,
                    cvSize(11,11), cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS
                    + CV_TERMCRIT_ITER, 30, 0.1));

            // Draw chessboard corners
            cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
            //cvShowImage("Calibration", image);
            camWnd->ShowPattern(image);

            // If we got a good board, add it to our data
            if(corner_count == board_n)
            {
                step = successes * board_n;
                for(int i = step, j = 0; j < board_n; ++i, ++j)
                {
                    CV_MAT_ELEM(*image_points, float, i, 0) = corners[j].x;
                    CV_MAT_ELEM(*image_points, float, i, 1) = corners[j].y;
                    CV_MAT_ELEM(*object_points, float, i, 0) = cInput->camBoardSqMmW * (float)(j / cInput->camBoardWidth);
                    CV_MAT_ELEM(*object_points, float, i, 1) = cInput->camBoardSqMmH * (float)(j % cInput->camBoardHeight);
                    CV_MAT_ELEM(*object_points, float, i, 2) = 0.0f;
                }
                CV_MAT_ELEM(*point_counts, int, successes, 0) = board_n;
                successes++;
                printf("+ Captured frame %d of %d.\n", successes, cInput->numberBoards);
                camWnd->SetLabelText(wxString::Format("+ Captured frame %d of %d.\n", successes, cInput->numberBoards));
            }
        } // End skip cInput->camSnapshotRate between chessboard capture
コード例 #14
0
int main(int argc, char *argv[])
{

	CvCapture *capture;			// = cvCreateCameraCapture( 0 );
	// assert( capture );

	if (argc != 5) {
		printf("\nERROR: Wrong number of input parameters");
		help();
		return -1;
	}
	help();
	board_w = atoi(argv[1]);
	board_h = atoi(argv[2]);
	n_boards = atoi(argv[3]);
	board_dt = atoi(argv[4]);

	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	capture = cvCreateCameraCapture(0);
	if (!capture) {
		printf("\nCouldn't open the camera\n");
		help();
		return -1;
	}

	cvNamedWindow("Calibration");
	cvNamedWindow("Raw Video");
	//ALLOCATE STORAGE
	CvMat *image_points = cvCreateMat(n_boards * board_n, 2, CV_32FC1);
	CvMat *object_points = cvCreateMat(n_boards * board_n, 3, CV_32FC1);
	CvMat *point_counts = cvCreateMat(n_boards, 1, CV_32SC1);
	CvMat *intrinsic_matrix = cvCreateMat(3, 3, CV_32FC1);
	CvMat *distortion_coeffs = cvCreateMat(4, 1, CV_32FC1);

	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count;
	int successes = 0;
	int step, frame = 0;

	IplImage *image = cvQueryFrame(capture);
	IplImage *gray_image = cvCreateImage(cvGetSize(image), 8, 1);	//subpixel

	// CAPTURE CORNER VIEWS LOOP UNTIL WE’VE GOT n_boards 
	// SUCCESSFUL CAPTURES (ALL CORNERS ON THE BOARD ARE FOUND)
	//
	help();
	while (successes < n_boards) {
		//Skip every board_dt frames to allow user to move chessboard
		if ((frame++ % board_dt) == 0) {
			//Find chessboard corners:
			int found =
				cvFindChessboardCorners(image, board_sz, corners, &corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);

			//Get Subpixel accuracy on those corners
			cvCvtColor(image, gray_image, CV_BGR2GRAY);
			cvFindCornerSubPix(gray_image, corners, corner_count,
							   cvSize(11, 11), cvSize(-1, -1),
							   cvTermCriteria(CV_TERMCRIT_EPS +
											  CV_TERMCRIT_ITER, 30, 0.1));

			//Draw it
			cvDrawChessboardCorners(image, board_sz, corners,
									corner_count, found);
			//      cvShowImage( "Calibration", image );

			// If we got a good board, add it to our data
			if (corner_count == board_n) {
				cvShowImage("Calibration", image);	//show in color if we did collect the image
				step = successes * board_n;
				for (int i = step, j = 0; j < board_n; ++i, ++j) {
					CV_MAT_ELEM(*image_points, float, i, 0) = corners[j].x;
					CV_MAT_ELEM(*image_points, float, i, 1) = corners[j].y;
					CV_MAT_ELEM(*object_points, float, i, 0) = j / board_w;
					CV_MAT_ELEM(*object_points, float, i, 1) = j % board_w;
					CV_MAT_ELEM(*object_points, float, i, 2) = 0.0f;
				}
				CV_MAT_ELEM(*point_counts, int, successes, 0) = board_n;
				successes++;
				printf("Collected our %d of %d needed chessboard images\n",
					   successes, n_boards);
			} else
コード例 #15
0
ファイル: rtbivroi.c プロジェクト: Ur-Ideas-LabS/hamuju
int main(int argc, char*argv[]) {

	int device;
	cvNamedWindow(camwindow,CV_WINDOW_AUTOSIZE);
	CvCapture* capture;
	CvMat* intrinsic ;
	CvMat* distortion;
	if (strcmp(argv[1],"-nocalib") == 0 && (argc == 4)){
		MODE = 1;
		H = (CvMat*)cvLoad(argv[2],NULL,NULL,NULL);
		device = atoi(argv[3]);
		capture = cvCaptureFromCAM( device) ; 
		Z=28;
			printf("\nUsage:\nReset: 'r'\nCrop-ROI: 'c'\nZoom: 'u' +/- 'd'\nSave: 's'\n Quit: 'q' | ESC key\n");
	}
	else if ((strcmp(argv[1],"-calib") == 0) && (argc == 7) ) {
		MODE = 2;
		board_w = atoi(argv[2]);
		board_h = atoi(argv[3]);
		intrinsic = (CvMat*)cvLoad(argv[4],NULL,NULL,NULL);  
		distortion = (CvMat*)cvLoad(argv[5],NULL,NULL,NULL);
		device = atoi(argv[6]);
		capture = cvCaptureFromCAM( device) ;
			printf("\nUsage:\nZoom: 'u' +/- 'd'\nBird-I-View: 't'\n Quit: 'q' | ESC key\n");
	}else {
		printf("Error:Wrong numbers of input parameters\n");
		printf("* if -option == -nocalib then only first 2 parameters are required \
 			 	\n Homography matrix \
 			  	\n usb-camera device driver \
 			* if -option == -calib then only 5 addition parameter are required \
 				\n  #inner checkerboard corners generally it is 7x7  \
 				\n  Intrinsic (xml) from Camera Calibration \
 				\n  Distortion (xml) from Camera Calibration \
 				\n  usb-camera device driver\n");
		return -1;		
	}	
	
	if (capture == NULL ){
		perror("\nFailure to access camera device\n");
		return -1;
	}
	

	CvSize board_sz = cvSize( board_w, board_h );
	int board_n = board_w*board_h;
	int frame=0, found = 0,corner_count = 0;
	CvPoint2D32f corners[board_n];

	cvNamedWindow(BVwindow, CV_WINDOW_AUTOSIZE);	
	cvSetMouseCallback(BVwindow, on_mouse, 0);
	CvMat stub;
	IplImage *image = cvQueryFrame( capture ); 
	IplImage *gray_image = cvCreateImage(cvGetSize(image),8,1);//subpixel
	frame++;

	//Bird Eye View with ROI
	birdsview_image =  cvCreateImage( cvGetSize(image), image->depth,3  );
	Mbvimg = cvGetMat(birdsview_image,&stub,NULL,0);				
	while((MODE == 1 )){
		// Capture bird's view image every 10 frames
		if (frame % board_dt == 0) {
			cvWarpPerspective( 
				image,
				birdsview_image,
				H,
				CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS |CV_WARP_INVERSE_MAP  ,
				cvScalarAll(0) );
			cvShowImage( BVwindow, birdsview_image );
			updateImage();
			frame=1;	
		}

		char key = (char) cvWaitKey(2);
		switch( (char) key )
		{
			case 'r':
				reset();
				if (frame % board_dt != 0) // sychronized updated
					updateImage();
				break;
			case 'c':
				BirdEyeROI();
				break;
			case 'u':
				Z+=0.5;
				CV_MAT_ELEM(*H,float,2,2) = Z;
				printf("\n%f",Z);
				break;
			case 'd':
				Z-=0.5;
				CV_MAT_ELEM(*H,float,2,2) = Z;
				printf("\n%f",Z);
				break;
			case 's':
				cvSaveImage("birdviewROIimg.bmp",birdsview_image,0);
				printf("\nImage Saved! Name: birdviewROIimg\n");
				break;
			case 27:
        		case 'q':
				return 0;
            			break;		
		}				
		cvShowImage(camwindow, image); //overlay points in web cam stream window
		image = cvQueryFrame(capture); //Get next image	
		frame++;
	}
	//Bird Eye View to extract Homography matrix			
	while((MODE == 2 )){

		//Skip every board_dt frames to allow user to move chessboard
		if (frame % board_dt == 0) {
			found = cvFindChessboardCorners(image,board_sz,corners,&corner_count,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
			if (found){
				cvCvtColor( image,gray_image,CV_BGR2GRAY ); 
				cvFindCornerSubPix( gray_image,corners,corner_count,cvSize(11,11),cvSize(-1,-1),cvTermCriteria( CV_TERMCRIT_EPS |CV_TERMCRIT_ITER,30,0.1));
				frame=1;
			}				
		}
	
		char key = (char)cvWaitKey(2);
        	switch (key)
        	{
			case 'u':
				Z +=0.5;
				printf("\n%f",Z);            			
				break;        		
			case 'd':
            			Z -=0.5;
				printf("\n%f",Z);
            			break;
        		case 't':
            			BirdsIview(intrinsic,distortion,cvCloneImage(image),corners);
	    			break;
        		case 27:
        		case 'q':
				if (H != NULL){
					cvSave("H.xml",H,NULL,NULL,cvAttrList(0,0));
					printf("\nHomography Saved! Name: H.xml\n");
				}
            			return 0;
            			break;	
        	}
		cvDrawChessboardCorners(image, board_sz, corners, corner_count,found);
		cvShowImage(camwindow, image); //overlay points in web cam stream window
		
		frame++;
		image = cvQueryFrame(capture); //Get next image			
	}

	return 0;

}
コード例 #16
0
ファイル: calibration.cpp プロジェクト: drewm1980/planepower
    void Calibrator::batchCalibrate()
    {
        log(Debug) << "(Calibrator) batchCalibrate entered" << endlog();
		

        int image_width = 0;
        int image_height = 0;
        log(Debug) << "calibration with " << _imageFiles.size() << " images" << endlog();
        for (unsigned int i = 0; i < _imageFiles.size(); ++i) {

            log(Debug) << "load image " << _imageFiles[i].c_str() <<endlog();
            cv::WImageBuffer1_b image( cvLoadImage(_imageFiles[i].c_str(), CV_LOAD_IMAGE_GRAYSCALE) );
            
            image_width = image.Width();
            image_height = image.Height();

            log(Debug) << "working on image name: " << _imageFiles[i].c_str() << endlog();

            int ncorners = 0;
            bool success = _detector->findCorners(image.Ipl(), &_corners[0], &ncorners);
            if (success) {
                log(Debug) << "found corners " << endlog();
              _cal->addView(&_corners[0], _detector->objectPoints(), _corners.size());
            } else {
            }

            cv::WImageBuffer3_b display(image.Width(), image.Height());
            cvCvtColor(image.Ipl(), display.Ipl(), CV_GRAY2BGR);
            cvDrawChessboardCorners(display.Ipl(), cvSize(boardWidth, boardHeight),&_corners[0], ncorners, success);
            cvShowImage("Calibration", display.Ipl());
            cvWaitKey(0);
        }
        /// Calibrate the model
        _cal->calibrate(image_width, image_height);
        /// Save the intrinsic camera parameters to file
        string fileName(imageDirectory);
        fileName.append("Intrinsics.ini");
        _cal->model().save(fileName.c_str());


        fileName = imageDirectory;
        fileName.append("K_cv.xml");
        cvSave(fileName.c_str(),&_cal->model().K_cv() );
        log(Debug) << "intrisics saved to " << fileName << endlog();

        /// Copy the extrinsic camera parameters out of the object
        //cvCopy( _cal.extrinsics_ , _extrinsics);
        _cal->getExtrinsics(*_rot,*_trans);
        
        /// Save the extrinsic data as a test TODO: put it on a bufferport
        //cvSave("extrinsic.xml", &_extrinsics);

        log(Debug) << "before cvSave " << endlog();
        fileName = imageDirectory;
        fileName.append("rot.xml");
        cvSave(fileName.c_str(),_rot);;
        fileName = imageDirectory;
        fileName.append("trans.xml");
        cvSave(fileName.c_str(), _trans);
        log(Debug) << "after cvSave " << endlog();

        log(Debug) << "(Calibrator) batchCalibrate finished" << endlog();
    }
コード例 #17
0
ファイル: calibration.cpp プロジェクト: janfrs/kwc-ros-pkg
int main( int argc, char** argv )
{
    CvSize board_size = {0,0};
    float square_size = 1.f, aspect_ratio = 1.f;
    const char* out_filename = "out_camera_data.yml";
    const char* input_filename = 0;
    int i, image_count = 10;
    int write_extrinsics = 0, write_points = 0;
    int flags = 0;
    CvCapture* capture = 0;
    FILE* f = 0;
    char imagename[1024];
    CvMemStorage* storage;
    CvSeq* image_points_seq = 0;
    int elem_size, flip_vertical = 0;
    int delay = 1000;
    clock_t prev_timestamp = 0;
    CvPoint2D32f* image_points_buf = 0;
    CvFont font = cvFont( 1, 1 );
    double _camera[9], _dist_coeffs[4];
    CvMat camera = cvMat( 3, 3, CV_64F, _camera );
    CvMat dist_coeffs = cvMat( 1, 4, CV_64F, _dist_coeffs );
    CvMat *extr_params = 0, *reproj_errs = 0;
    double avg_reproj_err = 0;
    int mode = DETECTION;
    int undistort_image = 0;
    CvSize img_size = {0,0};
    const char* live_capture_help = 
        "When the live video from camera is used as input, the following hot-keys may be used:\n"
            "  <ESC>, 'q' - quit the program\n"
            "  'g' - start capturing images\n"
            "  'u' - switch undistortion on/off\n";

    if( argc < 2 )
    {
  // calibration -w 6 -h 8 -s 2 -n 10 -o camera.yml -op -oe [<list_of_views.txt>]
      printf( "This is a camera calibration sample.\n"
            "Usage: calibration\n"
            "     -w <board_width>         # the number of inner corners per one of board dimension\n"
            "     -h <board_height>        # the number of inner corners per another board dimension\n"
            "     [-n <number_of_frames>]  # the number of frames to use for calibration\n"
            "                              # (if not specified, it will be set to the number\n"
            "                              #  of board views actually available)\n"
	    "     [-di <disk_images>       # Number of disk images before triggering undistortion\n"
            "     [-d <delay>]             # a minimum delay in ms between subsequent attempts to capture a next view\n"
            "                              # (used only for video capturing)\n"
            "     [-s <square_size>]       # square size in some user-defined units (1 by default)\n"
            "     [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
            "     [-op]                    # write detected feature points\n"
            "     [-oe]                    # write extrinsic parameters\n"
            "     [-zt]                    # assume zero tangential distortion\n"
            "     [-a <aspect_ratio>]      # fix aspect ratio (fx/fy)\n"
            "     [-p]                     # fix the principal point at the center\n"
            "     [-v]                     # flip the captured images around the horizontal axis\n"
            "     [input_data]             # input data, one of the following:\n"
            "                              #  - text file with a list of the images of the board\n"
            "                              #  - name of video file with a video of the board\n"
            "                              # if input_data not specified, a live view from the camera is used\n"
            "\n" );
        printf( "%s", live_capture_help );
        return 0;
    }

    for( i = 1; i < argc; i++ )
    {
        const char* s = argv[i];
        if( strcmp( s, "-w" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.width ) != 1 || board_size.width <= 0 )
                return fprintf( stderr, "Invalid board width\n" ), -1;
        }
        else if( strcmp( s, "-h" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.height ) != 1 || board_size.height <= 0 )
                return fprintf( stderr, "Invalid board height\n" ), -1;
        }
        else if( strcmp( s, "-s" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &square_size ) != 1 || square_size <= 0 )
                return fprintf( stderr, "Invalid board square width\n" ), -1;
        }
        else if( strcmp( s, "-n" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &image_count ) != 1 || image_count <= 3 )
                return printf("Invalid number of images\n" ), -1;
        }
	else if( strcmp( s, "-di") == 0)
	{
	    if( sscanf( argv[++i], "%d", &images_from_file) != 1 || images_from_file < 3)
		return printf("Invalid di, must be >= 3\n"), -1;
	}
        else if( strcmp( s, "-a" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &aspect_ratio ) != 1 || aspect_ratio <= 0 )
                return printf("Invalid aspect ratio\n" ), -1;
        }
        else if( strcmp( s, "-d" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
                return printf("Invalid delay\n" ), -1;
        }
        else if( strcmp( s, "-op" ) == 0 )
        {
            write_points = 1;
        }
        else if( strcmp( s, "-oe" ) == 0 )
        {
            write_extrinsics = 1;
        }
        else if( strcmp( s, "-zt" ) == 0 )
        {
            flags |= CV_CALIB_ZERO_TANGENT_DIST;
        }
        else if( strcmp( s, "-p" ) == 0 )
        {
            flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
        }
        else if( strcmp( s, "-v" ) == 0 )
        {
            flip_vertical = 1;
        }
        else if( strcmp( s, "-o" ) == 0 )
        {
            out_filename = argv[++i];
        }
        else if( s[0] != '-' )
            input_filename = s;
        else
            return fprintf( stderr, "Unknown option %s", s ), -1;
    }

    if( input_filename )
    {
        capture = cvCreateFileCapture( input_filename );
        if( !capture )
        {
            f = fopen( input_filename, "rt" );
            if( !f )
                return fprintf( stderr, "The input file could not be opened\n" ), -1;
            image_count = -1;
        }
        mode = CAPTURING;
    }
    else
        capture = cvCreateCameraCapture(0);

    if( !capture && !f )
        return fprintf( stderr, "Could not initialize video capture\n" ), -2;

    if( capture )
        printf( "%s", live_capture_help );

    elem_size = board_size.width*board_size.height*sizeof(image_points_buf[0]);
    storage = cvCreateMemStorage( MAX( elem_size*4, 1 << 16 ));
    image_points_buf = (CvPoint2D32f*)cvAlloc( elem_size );
    image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );

    cvNamedWindow( "Image View", 1 );
    cvNamedWindow( "Undistort",1);
    int disk_image_cnt = 0;

    for(;;)
    {
        IplImage *view = 0, *view_gray = 0;
        int count = 0, found, blink = 0;
        CvPoint text_origin;
        CvSize text_size = {0,0};
        int base_line = 0;
        char s[100];
        int key;
        
        if( f && fgets( imagename, sizeof(imagename)-2, f ))
        {
            int l = strlen(imagename);
            if( l > 0 && imagename[l-1] == '\n' )
                imagename[--l] = '\0';
            if( l > 0 )
            {
                if( imagename[0] == '#' )
                    continue;
                view = cvLoadImage( imagename, 1 );
                disk_image_cnt++;
           }
        }
        else if( capture )
        {
            IplImage* view0 = cvQueryFrame( capture );
            if( view0 )
            {
                view = cvCreateImage( cvGetSize(view0), IPL_DEPTH_8U, view0->nChannels );
                if( view0->origin == IPL_ORIGIN_BL )
                    cvFlip( view0, view, 0 );
                else
                    cvCopy( view0, view );
            }
        }

        if( !view || (disk_image_cnt == images_from_file))
        {
            if( image_points_seq->total > 0 )
            {
                image_count = image_points_seq->total;
                goto calibrate;
            }
            break;
        }

        if( flip_vertical )
            cvFlip( view, view, 0 );

        img_size = cvGetSize(view);
        found = cvFindChessboardCorners( view, board_size,
            image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH );

#if 1
        // improve the found corners' coordinate accuracy
        view_gray = cvCreateImage( cvGetSize(view), 8, 1 );
        cvCvtColor( view, view_gray, CV_BGR2GRAY );
        cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11),
            cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
        cvReleaseImage( &view_gray );
#endif

        if( mode == CAPTURING && found && (f || clock() - prev_timestamp > delay*1e-3*CLOCKS_PER_SEC) )
        {
            cvSeqPush( image_points_seq, image_points_buf );
            prev_timestamp = clock();
            blink = !f;
#if 1
            if( capture )
            {
                sprintf( imagename, "view%03d.png", image_points_seq->total - 1 );
                cvSaveImage( imagename, view );
            }
#endif
        }

        cvDrawChessboardCorners( view, board_size, image_points_buf, count, found );

        cvGetTextSize( "100/100", &font, &text_size, &base_line );
        text_origin.x = view->width - text_size.width - 10;
        text_origin.y = view->height - base_line - 10;

        if( mode == CAPTURING )
        {
            if( image_count > 0 )
                sprintf( s, "%d/%d", image_points_seq ? image_points_seq->total : 0, image_count );
            else
                sprintf( s, "%d/?", image_points_seq ? image_points_seq->total : 0 );
        }
        else if( mode == CALIBRATED )
            sprintf( s, "Calibrated" );
        else
            sprintf( s, "Press 'g' to start" );

        cvPutText( view, s, text_origin, &font, mode != CALIBRATED ?
                                   CV_RGB(255,0,0) : CV_RGB(0,255,0));

        if( blink )
            cvNot( view, view );
        //Rectify or Undistort the image
        if( mode == CALIBRATED && undistort_image )
        {
            IplImage* t = cvCloneImage( view );
            cvShowImage("Image View", view);
            cvUndistort2( t, view, &camera, &dist_coeffs );
            cvReleaseImage( &t );
 	    cvShowImage( "Undistort", view );
            cvWaitKey(0);
       }
	else{
	        cvShowImage( "Image View", view );
        	key = cvWaitKey(capture ? 50 : 500);
	}
        if( key == 27 )
            break;
        
        if( key == 'u' && mode == CALIBRATED ){
            undistort_image = !undistort_image;
	}

        if( capture && key == 'g' )
        {
            mode = CAPTURING;
            cvClearMemStorage( storage );
            image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
        }

        if( mode == CAPTURING && (unsigned)image_points_seq->total >= (unsigned)image_count )
        {
calibrate:
            if(disk_image_cnt == images_from_file)
                 undistort_image = !undistort_image;
            cvReleaseMat( &extr_params );
            cvReleaseMat( &reproj_errs );
            int code = run_calibration( image_points_seq, img_size, board_size,
                square_size, aspect_ratio, flags, &camera, &dist_coeffs, &extr_params,
                &reproj_errs, &avg_reproj_err );
            // save camera parameters in any case, to catch Inf's/NaN's
            save_camera_params( out_filename, image_count, img_size,
                board_size, square_size, aspect_ratio, flags,
                &camera, &dist_coeffs, write_extrinsics ? extr_params : 0,
                write_points ? image_points_seq : 0, reproj_errs, avg_reproj_err );
            if( code )
                mode = CALIBRATED;
            else
                mode = DETECTION;
        }

        if( !view )
            break;
        cvReleaseImage( &view );
    }

    if( capture )
        cvReleaseCapture( &capture );
    return 0;
}
コード例 #18
0
ファイル: calibr.cpp プロジェクト: awg21/sikle850_win
int calibrCam( CvCapture* capture, int board_w, int board_h, int n_boards, CvMat* intrinsic_matrix, CvMat* distortion_coeffs ) {

	/*if(argc != 4){
		printf(“ERROR: Wrong number of input parameters\n”);
		return -1;
	}*/

	/*board_w = atoi(argv[1]);
	board_h = atoi(argv[2]);
	n_boards = atoi(argv[3]);*/
	
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize( board_w, board_h );
	//CvCapture* capture = cvCreateCameraCapture( 0 );
	//assert( capture );
	cvNamedWindow( "Calibration" );
	
	//ALLOCATE STORAGE
	CvMat* image_points = cvCreateMat( 
		n_boards*board_n, 
		2, 
		CV_32FC1 );
	CvMat* object_points = cvCreateMat( n_boards*board_n, 3, CV_32FC1 );
	CvMat* point_counts = cvCreateMat( n_boards, 1, CV_32SC1 );
	//CvMat* intrinsic_matrix = cvCreateMat( 3, 3, CV_32FC1 );
	//CvMat* distortion_coeffs = cvCreateMat( 5, 1, CV_32FC1 );
	CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
	int corner_count;
	int successes = 0;
	int step, frame = 0;
	IplImage *image = cvQueryFrame( capture );
	IplImage *gray_image = cvCreateImage(cvGetSize(image),8,1);//subpixel

	// CAPTURE CORNER VIEWS LOOP UNTIL WE’VE GOT n_boards
	// SUCCESSFUL CAPTURES (ALL CORNERS ON THE BOARD ARE FOUND)
	//
	int a = 0;

	while(/*successes < n_boards*/  (a+2) < n_boards ) {
		//Skip every board_dt frames to allow user to move chessboard
		a = (int)(cvGetCaptureProperty( capture, CV_CAP_PROP_POS_FRAMES)/1000);
		int b = (int)cvGetCaptureProperty( capture, CV_CAP_PROP_POS_MSEC);
		if(/*frame++*/a % board_dt == 0) {
			//Find chessboard corners:
			printf( "Frame # %d\n", a );
			printf( "Frame  %d ms\n", b );
			int found = cvFindChessboardCorners(
				image, board_sz, corners, &corner_count,
				CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
				);
			//Get Subpixel accuracy on those corners
			cvCvtColor(image, gray_image, CV_BGR2GRAY);
			cvFindCornerSubPix(gray_image, corners, corner_count,
				cvSize(11,11),cvSize(-1,-1), cvTermCriteria(
					CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
			//Draw it
			cvDrawChessboardCorners(image, board_sz, corners,
				corner_count, found);
			cvShowImage( "Calibration", image );
			// If we got a good board, add it to our data
			if( corner_count == board_n ) {
				step = successes*board_n;
				for( int i=step, j=0; j<board_n; ++i, ++j ) {
					CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
					CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
					CV_MAT_ELEM( *object_points, float, i, 0 ) = j/board_w;
					CV_MAT_ELEM( *object_points, float, i, 1 ) = j%board_w;
					CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
				}
				CV_MAT_ELEM( *point_counts, int, successes, 0) = board_n;
				successes++;
			}
		} 	//end skip board_dt between chessboard capture
コード例 #19
0
/* this handles computation of the distortion matrices of the camera */
void calibration(int board_w, int board_h, int n_boards, float square_width,
				 float square_height, CvSize resolution, int device_id) {

	int board_n = board_w * board_h;
	CvSize board_sz = cvSize( board_w, board_h );

	VideoCapture capture(device_id,resolution.width,resolution.height);
	IplImage *image = capture.CreateCaptureImage();

	cvNamedWindow( "Calibration" );

	//ALLOCATE STORAGE
	CvMat* image_points = cvCreateMat(n_boards*board_n,2,CV_32FC1);
	CvMat* object_points = cvCreateMat(n_boards*board_n,3,CV_32FC1);
	CvMat* point_counts = cvCreateMat(n_boards,1,CV_32SC1);
	CvMat* intrinsic_matrix = cvCreateMat(3,3,CV_32FC1);
	CvMat* distortion_coeffs = cvCreateMat(5,1,CV_32FC1);

	CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
	int corner_count;
	int successes = 0;
	int step, frame = 0;

	IplImage *gray_image = cvCreateImage(cvGetSize(image),8,1);//subpixel
	
	// CAPTURE CORNER VIEWS LOOP UNTIL WE'VE GOT n_boards
	// SUCCESSFUL CAPTURES (ALL CORNERS ON THE BOARD ARE FOUND)
	//
	cvNamedWindow("Live View");
	while(successes < n_boards) {
		char c=0;
		while(c!='c'){
			capture.waitFrame(image);
			cvShowImage("Live View",image);
			c=cvWaitKey(1);
		}

		//==============
		//Find chessboard corners:
		int found = cvFindChessboardCorners(
			image, board_sz, corners, &corner_count,
			CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
			);

		//Get Subpixel accuracy on those corners
		cvCvtColor(image, gray_image, CV_BGR2GRAY);
		cvFindCornerSubPix(gray_image, corners, corner_count,
			cvSize(11,11),cvSize(-1,-1), cvTermCriteria(
			CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

		//Draw it
		cvDrawChessboardCorners(image, board_sz, corners,
			corner_count, found);
		cvShowImage( "Calibration", image );

		// If we got a good board, add it to our data
		if( corner_count == board_n ) {
			step = successes*board_n;
			for( int i=step, j=0; j<board_n; ++i,++j ) {
				CV_MAT_ELEM(*image_points, float,i,0) = corners[j].x;
				CV_MAT_ELEM(*image_points, float,i,1) = corners[j].y;
				CV_MAT_ELEM(*object_points,float,i,0) = square_height*(j/board_w);
				CV_MAT_ELEM(*object_points,float,i,1) = square_width*(j%board_w);
				CV_MAT_ELEM(*object_points,float,i,2) = 0.0f;
			}
			CV_MAT_ELEM(*point_counts, int,successes,0) = board_n;
			successes++;
		} else {
コード例 #20
0
int CCalibration::CalibrateFromCAMorAVI( int nImages, int nChessRow, int nChessCol, int nSquareSize, int nSkip, CvMat* intrinsic, CvMat* distortion, int fromCamera, char* fName)
{
    int nChessSize = nChessRow*nChessCol;
    int nAllPoints = nImages*nChessSize;
    int i, j, k;
    int corner_count, found;
    int* p_count = new int[nImages];
    IplImage **src_img = new IplImage*[nImages];
    CvSize pattern_size = cvSize (nChessCol, nChessRow);
    CvPoint3D32f* objects = new CvPoint3D32f[nAllPoints];
    CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * nAllPoints);
    CvMat object_points;
    CvMat image_points;
    CvMat point_counts;
    CvMat *rotation = cvCreateMat (1, 3, CV_32FC1);
    CvMat *translation = cvCreateMat (1, 3, CV_32FC1);

    // (1)
    for (i = 0; i < nImages; i++) {
        for (j = 0; j < nChessRow; j++) {
            for (k = 0; k < nChessCol; k++) {
                objects[i * nChessSize + j * nChessCol + k].x = (float)j * nSquareSize;
                objects[i * nChessSize + j * nChessCol + k].y = (float)k * nSquareSize;
                objects[i * nChessSize + j * nChessCol + k].z = 0.0;
            }
        }
    }
    cvInitMatHeader (&object_points, nAllPoints, 3, CV_32FC1, objects);

    // (2)
    CvCapture *capture = NULL;
    if (fromCamera)
        capture = cvCaptureFromCAM(0);
    else
        capture = cvCaptureFromAVI(fName);
    assert(capture);

    int found_num = 0;
    cvNamedWindow ("Calibration", CV_WINDOW_AUTOSIZE);
    cvNamedWindow ("Webcam", CV_WINDOW_AUTOSIZE);
    int c = 0;
    for (i = 0; i < nImages; i++)
    {
        IplImage * frame;
        while (true)
        {
            frame = cvQueryFrame(capture);
            cvShowImage("Webcam", frame);

            if (c++ % nSkip == 0)
            {
                found = cvFindChessboardCorners (frame, pattern_size, &corners[i * nChessSize], &corner_count);
                if (found)
                {
                    char s[100];
                    sprintf(s, "%d.png", i);
                    cvSaveImage(s, frame);
                    src_img[i] = cvCloneImage(frame);
                    fprintf (stderr, "ok\n");
                    found_num++;
                    break;
                }
            }
            cvWaitKey(5);
        }
        fprintf (stderr, "%02d...", i);

        // (4)
        IplImage *src_gray = cvCreateImage (cvGetSize (src_img[i]), IPL_DEPTH_8U, 1);
        cvCvtColor (src_img[i], src_gray, CV_BGR2GRAY);
        cvFindCornerSubPix (src_gray, &corners[i * nChessSize], corner_count,
                            cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
        cvDrawChessboardCorners (src_img[i], pattern_size, &corners[i * nChessSize], corner_count, found);
        p_count[i] = corner_count;
        cvShowImage ("Calibration", src_img[i]);
        //cvWaitKey (0);
    }
    cvDestroyWindow ("Calibration");

    if (found_num != nImages)
        return -1;
    cvInitMatHeader (&image_points, nAllPoints, 1, CV_32FC2, corners);
    cvInitMatHeader (&point_counts, nImages, 1, CV_32SC1, p_count);
    // (5)
    cvCalibrateCamera2 (&object_points, &image_points, &point_counts, cvSize (640, 480), intrinsic, distortion);

    // (6)
    /*
    CvMat sub_image_points, sub_object_points;
    int base = 0;
    cvGetRows (&image_points, &sub_image_points, base * nChessSize, (base + 1) * nChessSize);
    cvGetRows (&object_points, &sub_object_points, base * nChessSize, (base + 1) * nChessSize);
    cvFindExtrinsicCameraParams2 (&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);
    */
    // (7)ToXML
    /*
    CvFileStorage *fs;
    fs = cvOpenFileStorage ("camera.xml", 0, CV_STORAGE_WRITE);
    cvWrite (fs, "intrinsic", intrinsic);
    cvWrite (fs, "rotation", rotation);
    cvWrite (fs, "translation", translation);
    cvWrite (fs, "distortion", distortion);
    cvReleaseFileStorage (&fs);*/

    for (i = 0; i < nImages; i++)
        cvReleaseImage (&src_img[i]);
    delete p_count;
    delete src_img;
    delete objects;
    return 1;
}
コード例 #21
0
int
main (int argc, char *argv[])
{
	// IMAGE_NUM, PAT_ROW, PAT_COL,PAT_SIZE, ALL_POINTS, CHESS_SIZE

	/*
	if (argc < 6)
	{
		std::cout<< "ERROR : augment is incorrect" << std::endl;
		return -1;
	}
	*/

	//int PAT_ROW   	= atoi(argv[3]);
	//int PAT_COL   	= atoi(argv[4]);
	//int CHESS_SIZE	= atoi(argv[5]);
	//int PAT_SIZE = PAT_ROW*PAT_COL;
	char* NAME_IMG_IN		= argv[1];
	//char* NAME_XML_OUT	= argv[2];

	int i,j;
	int corner_count, found;
	IplImage *src_img;
	CvSize pattern_size = cvSize(PAT_COL, PAT_ROW);
	CvMat image_points;
	CvMat object_points;
	CvMat *intrinsic, *distortion;
	CvMat *rotation = cvCreateMat(1, 3, CV_32FC1);
	CvMat *rotationConv = cvCreateMat(3, 3, CV_32FC1);
	CvMat *translation = cvCreateMat(1, 3, CV_32FC1);
	CvPoint3D32f objects[PAT_SIZE];
	CvFileStorage *fs;
	CvFileNode *param;
	CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * PAT_SIZE);

	// (1)�����оݤȤʤ������ɤ߹���
	if ( ( src_img = cvLoadImage(NAME_IMG_IN, CV_LOAD_IMAGE_COLOR) ) == 0)
	//if (argc < 2 || (src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_COLOR)) == 0)
	{
		std::cout<< "ERROR : input image is not exist  or  augment is incorrect" << std::endl;
		return -1;
	}

	// 3�������ֺ�ɸ������
	for (i = 0; i < PAT_ROW; i++) {
		for (j = 0; j < PAT_COL; j++) {
			objects[i * PAT_COL + j].x = i * CHESS_SIZE;
			objects[i * PAT_COL + j].y = j * CHESS_SIZE;
			objects[i * PAT_COL + j].z = 0.0;
		}
	}
	cvInitMatHeader(&object_points, PAT_SIZE, 3, CV_32FC1, objects);

	// �������ܡ��ɡʥ����֥졼�����ѥ�����ˤΥ����ʡ�����
	int found_num = 0;
//	cvNamedWindow("Calibration", CV_WINDOW_AUTOSIZE);
	found = cvFindChessboardCorners(src_img, pattern_size, &corners[0], &corner_count);
	fprintf(stderr, "corner:%02d...\n", corner_count);
	if (found) {
		fprintf(stderr, "ok\n");
	} else {
		fprintf(stderr, "fail\n");
	}

	// (4)�����ʡ����֤򥵥֥ԥ��������٤˽���������
	IplImage *src_gray = cvCreateImage (cvGetSize (src_img), IPL_DEPTH_8U, 1);
	cvCvtColor (src_img, src_gray, CV_BGR2GRAY);
	cvFindCornerSubPix (src_gray, &corners[0], corner_count,
			cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
	cvDrawChessboardCorners (src_img, pattern_size, &corners[0], corner_count, found);

//	cvShowImage ("Calibration", src_img);
//	cvWaitKey (0);
//	cvDestroyWindow("Calibration");
	cvShowImage ("Calibration", src_img);

	cvInitMatHeader(&image_points, PAT_SIZE, 1, CV_32FC2, corners);


	// (2)�ѥ�᡼���ե�������ɤ߹���
	fs = cvOpenFileStorage ("xml/rgb.xml", 0, CV_STORAGE_READ);
	param = cvGetFileNodeByName (fs, NULL, "intrinsic");
	intrinsic = (CvMat *) cvRead (fs, param);
	param = cvGetFileNodeByName (fs, NULL, "distortion");
	distortion = (CvMat *) cvRead (fs, param);
	cvReleaseFileStorage (&fs);

	// (3) �����ѥ�᡼���ο���
	CvMat sub_image_points, sub_object_points;
	int base = 0;
	cvGetRows(&image_points, &sub_image_points, base * PAT_SIZE, (base + 1) * PAT_SIZE);
	cvGetRows(&object_points, &sub_object_points, base * PAT_SIZE, (base + 1)* PAT_SIZE);
	cvFindExtrinsicCameraParams2(&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);
	int ret = cvRodrigues2(rotation, rotationConv);

//	int cols = sub_object_points.rows;
//	printf("cols = %d\n", cols);
//	printf("%f\n",sub_object_points.data.fl[0]);

	// mm -> m
	for (i = 0; i < translation->cols; i++) { translation->data.fl[i] = translation->data.fl[i] / 1000;}

	// (4)XML�ե�����ؤν񤭽Ф�
	//fs = cvOpenFileStorage(argv[2], 0, CV_STORAGE_WRITE);
	fs = cvOpenFileStorage(NAME_XML_OUT, 0, CV_STORAGE_WRITE);
	cvWrite(fs, "rotation", rotationConv);
	cvWrite(fs, "translation", translation);
	cvReleaseFileStorage(&fs);

	/////////////////////////////////////////////////
	// write out py
	if(1)
	{
		cv::Mat ttt(translation);
		cv::Mat rrr(rotationConv);


		char data2Write[1024];
		char textFileName[256];
		sprintf( textFileName , "cbCoord/cbOneShot.py");
		std::ofstream outPy(textFileName);

		outPy << "import sys"						<<std::endl;
		outPy << "sys.path.append('../')"			<<std::endl;
		outPy << "from numpy import *"				<<std::endl;
		outPy << "from numpy.linalg import svd"	<<std::endl;
		outPy << "from numpy.linalg import inv"	<<std::endl;
		outPy << "from chessboard_points import *"<<std::endl;
		outPy << "sys.path.append('../geo')"		<<std::endl;
		outPy << "from geo import *"				<<std::endl;

		/*
		///////////////////////////////////////////////////////////////////////////////////
		// out translation and rotation as xyzabc list
		outPy << "xyzabc = []"	<<std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", ttt.at<float>(0) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", ttt.at<float>(1) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", ttt.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", rrr.at<float>(0) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", rrr.at<float>(1) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", rrr.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;
		// out translation and rotation as xyzabc list
		///////////////////////////////////////////////////////////////////////////////////
		 */

		///////////////////////////////////////////////////////////////////////////////////
		// out translation
		outPy << "ttt = []"	<<std::endl;

		sprintf( data2Write, "ttt.append(%f)", ttt.at<float>(0) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "ttt.append(%f)", ttt.at<float>(1) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "ttt.append(%f)", ttt.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;
		// out translation
		//////////////////////////////////////////////////////////////////////////////////////

		///////////////////////////////////////////////////////////////////////////////////
		// out rotation
		outPy << "rrr = []"	<<std::endl;

		sprintf( data2Write, "rrr.append([%f,%f,%f])", rrr.at<float>(0), rrr.at<float>(1), rrr.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "rrr.append([%f,%f,%f])", rrr.at<float>(3), rrr.at<float>(4), rrr.at<float>(5) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "rrr.append([%f,%f,%f])", rrr.at<float>(6), rrr.at<float>(7), rrr.at<float>(8) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;
		// out rotation
		//////////////////////////////////////////////////////////////////////////////////////

	
		/////////////////////////////////////////////////////////////////
		outPy<< "_T = FRAME( vec=ttt, mat=rrr )" << std::endl;
		/////////////////////////////////////////////////////////////////

	}
	// write out py
	/////////////////////////////////////////////////
	
	std::cout<< "press any key..."<< std::endl;
	cvWaitKey (0);
	cvDestroyWindow("Calibration");


	cvReleaseImage(&src_img);
	cvReleaseMat(&intrinsic);
	cvReleaseMat(&distortion);

	return 0;
}
コード例 #22
0
void bird_eye() {
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat*) cvLoad("Intrinsics.xml");
	CvMat *distortion = (CvMat*) cvLoad("Distortion.xml");

	IplImage* image = cvLoadImage("./Resource/bird-eye.jpg", 1);
	IplImage* gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	IplImage* mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage* mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	
	cvInitUndistortMap(
			intrinsic, 
			distortion, 
			mapx, 
			mapy
	);
	
	IplImage* t = cvCloneImage(image);

	cvRemap(t, image, mapx, mapy);
	
	cvNamedWindow("Chessboard");
	cvShowImage("Chessboard", image);
	int c = cvWaitKey(-1);
	CvPoint2D32f* corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	
	int found = cvFindChessboardCorners(
			image, 
			board_sz, 
			corners, 
			&corner_count, 
			CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
	);
	
	if(!found){
		printf("couldn't aquire chessboard!\n");
		return;
	}
	
	cvFindCornerSubPix(
			gray_image, 
			corners, 
			corner_count, 
			cvSize(11, 11), 
			cvSize(-1, -1), 
			cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1)
	);

	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;			objPts[0].y = 0;
	objPts[1].x = board_w - 1;	objPts[1].y = 0;
	objPts[2].x = 0;			objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;	objPts[3].y = board_h - 1;
	imgPts[0]   = corners[0];
	imgPts[1]	= corners[board_w - 1];
	imgPts[2]	= corners[(board_h - 1) * board_w];
	imgPts[3]	= corners[(board_h - 1) * board_w + board_w - 1];

	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	cvDrawChessboardCorners(
		image,
		board_sz,
		corners,
		corner_count,
		found
	);

	cvShowImage("Chessboard", image);

	CvMat *H = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	float z = 25;
	int key = 0;
	IplImage * birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");

	while(key != 27) {
		CV_MAT_ELEM(*H, float, 2, 2) = z;

		cvWarpPerspective(
			image,
			birds_image,
			H,
			CV_INTER_LINEAR| CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS
		);

		cvShowImage("Birds_Eye", birds_image);

		key = cvWaitKey();
		if(key == 'u') z += 0.5;
		if(key == 'd') z -= 0.5;
	}

	cvSave("H.xml", H);
}
コード例 #23
0
ファイル: calib.cpp プロジェクト: fkanehiro/etc
int
main (int argc, char *argv[])
{
  int i, j, k;
  int corner_count, found;
  int p_count[IMAGE_NUM];
  IplImage *src_img[IMAGE_NUM];
  CvSize pattern_size = cvSize (PAT_COL, PAT_ROW);
  CvPoint3D32f objects[ALL_POINTS];
  CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * ALL_POINTS);
  CvMat object_points;
  CvMat image_points;
  CvMat point_counts;
  CvMat *intrinsic = cvCreateMat (3, 3, CV_32FC1);
  CvMat *rotation = cvCreateMat (1, 3, CV_32FC1);
  CvMat *translation = cvCreateMat (1, 3, CV_32FC1);
  CvMat *distortion = cvCreateMat (1, 4, CV_32FC1);

  // (1)キャリブレーション画像の読み込み
  for (i = 0; i < IMAGE_NUM; i++) {
    char buf[32];
    sprintf (buf, "calib_img/%02d.png", i);
    if ((src_img[i] = cvLoadImage (buf, CV_LOAD_IMAGE_COLOR)) == NULL) {
      fprintf (stderr, "cannot load image file : %s\n", buf);
    }
  }

  // (2)3次元空間座標の設定
  for (i = 0; i < IMAGE_NUM; i++) {
    for (j = 0; j < PAT_ROW; j++) {
      for (k = 0; k < PAT_COL; k++) {
        objects[i * PAT_SIZE + j * PAT_COL + k].x = j * CHESS_SIZE;
        objects[i * PAT_SIZE + j * PAT_COL + k].y = k * CHESS_SIZE;
        objects[i * PAT_SIZE + j * PAT_COL + k].z = 0.0;
      }
    }
  }
  cvInitMatHeader (&object_points, ALL_POINTS, 3, CV_32FC1, objects);

  // (3)チェスボード(キャリブレーションパターン)のコーナー検出
  int found_num = 0;
  cvNamedWindow ("Calibration", CV_WINDOW_AUTOSIZE);
  for (i = 0; i < IMAGE_NUM; i++) {
    found = cvFindChessboardCorners (src_img[i], pattern_size, &corners[i * PAT_SIZE], &corner_count);
    fprintf (stderr, "%02d...", i);
    if (found) {
      fprintf (stderr, "ok\n");
      found_num++;
    }
    else {
      fprintf (stderr, "fail\n");
    }
    // (4)コーナー位置をサブピクセル精度に修正,描画
    IplImage *src_gray = cvCreateImage (cvGetSize (src_img[i]), IPL_DEPTH_8U, 1);
    cvCvtColor (src_img[i], src_gray, CV_BGR2GRAY);
    cvFindCornerSubPix (src_gray, &corners[i * PAT_SIZE], corner_count,
                        cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
    cvDrawChessboardCorners (src_img[i], pattern_size, &corners[i * PAT_SIZE], corner_count, found);
    p_count[i] = corner_count;
    cvShowImage ("Calibration", src_img[i]);
    cvWaitKey (0);
  }
  cvDestroyWindow ("Calibration");

  if (found_num != IMAGE_NUM)
    return -1;
  cvInitMatHeader (&image_points, ALL_POINTS, 1, CV_32FC2, corners);
  cvInitMatHeader (&point_counts, IMAGE_NUM, 1, CV_32SC1, p_count);

  // (5)内部パラメータ,歪み係数の推定
  cvCalibrateCamera2 (&object_points, &image_points, &point_counts, cvSize (640, 480), intrinsic, distortion);

  // (6)外部パラメータの推定
  CvMat sub_image_points, sub_object_points;
  int base = 0;
  cvGetRows (&image_points, &sub_image_points, base * PAT_SIZE, (base + 1) * PAT_SIZE);
  cvGetRows (&object_points, &sub_object_points, base * PAT_SIZE, (base + 1) * PAT_SIZE);
  cvFindExtrinsicCameraParams2 (&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);

  // (7)XMLファイルへの書き出し
  CvFileStorage *fs;
  fs = cvOpenFileStorage ("camera.xml", 0, CV_STORAGE_WRITE);
  cvWrite (fs, "intrinsic", intrinsic);
  cvWrite (fs, "rotation", rotation);
  cvWrite (fs, "translation", translation);
  cvWrite (fs, "distortion", distortion);
  cvReleaseFileStorage (&fs);

  for (i = 0; i < IMAGE_NUM; i++) {
    cvReleaseImage (&src_img[i]);
  }

  return 0;
}
コード例 #24
0
ファイル: calibrator.cpp プロジェクト: goretkin/kwc-ros-pkg
  void process_image()
  {

    //    std::cout << "Checking publish count: " << image_in->publish_count << std::endl;

    //    image_in->lock_atom();

    if (image_in->publish_count > 0) {

      cvSetData(cvimage_in, codec_in->get_raster(), 3*704);
      cvConvertImage(cvimage_in, cvimage_bgr, CV_CVTIMG_SWAP_RB);

      //      image_in->unlock_atom();

      CvSize board_sz = cvSize(12, 12);
      CvPoint2D32f* corners = new CvPoint2D32f[12*12];
      int corner_count = 0;
    
      //This function has a memory leak in the current version of opencv!
      int found = cvFindChessboardCorners(cvimage_bgr, board_sz, corners, &corner_count, 
      					  CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);



      IplImage* gray = cvCreateImage(cvSize(cvimage_bgr->width, cvimage_bgr->height), IPL_DEPTH_8U, 1);
      cvCvtColor(cvimage_bgr, gray, CV_BGR2GRAY);
      cvFindCornerSubPix(gray, corners, corner_count, 
      			 cvSize(5, 5), cvSize(-1, -1),
      			 cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.01f ));
      cvReleaseImage(&gray);


      if (take_pic && corner_count == 144) {
	std::stringstream ss;
	img_cnt++;
	ss << dir_name << "/Image" << img_cnt << ".jpg";
	//	std::ofstream imgfile(ss.str().c_str());
	//	imgfile.write((char*)image_in->jpeg_buffer, image_in->compressed_size);
	//	imgfile.close();

	cvSaveImage(ss.str().c_str(), cvimage_bgr);
	
	ss.str("");
	ss << dir_name << "/Position" << img_cnt << ".txt";

	std::ofstream posfile(ss.str().c_str());
	observe->lock_atom();
	posfile << "P: " << observe->pan_val << std::endl
		<< "T: " << observe->tilt_val << std::endl
		<< "Z: " << observe->lens_zoom_val << std::endl
		<< "F: " << observe->lens_focus_val;
	observe->unlock_atom();

	posfile.close();

	take_pic = false;
      }

      float maxdiff = 0;

      for(int c=0; c<12*12; c++) {
	float diff = sqrt( pow(corners[c].x - last_corners[c].x, 2.0) + 
		     pow(corners[c].y - last_corners[c].y, 2.0));
	last_corners[c].x = corners[c].x;
	last_corners[c].y = corners[c].y;

	if (diff > maxdiff) {
	  maxdiff = diff;
	}
      }

      printf("Max diff: %g\n", maxdiff);


      cvDrawChessboardCorners(cvimage_bgr, board_sz, corners, corner_count, found);

      if (undistort) {
	cvUndistort2(cvimage_bgr, cvimage_undistort, intrinsic_matrix, distortion_coeffs);
      } else {
	cvCopy(cvimage_bgr, cvimage_undistort);
      }

      CvFont font;
      cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2);    
      std::stringstream ss;

      observe->lock_atom();
      ss << "P: " << observe->pan_val;
      ss << " T: " << observe->tilt_val;
      ss << " Z: " << observe->lens_zoom_val;
      ss << " F: " << observe->lens_focus_val;
      observe->unlock_atom();
      cvPutText(cvimage_undistort, ss.str().c_str(), cvPoint(15,30), &font, CV_RGB(255,0,0));

      ss.str("");

      ss << "Found " << corner_count << " corners";
      if (centering) {
	ss << " -- Autocentering";
      }
      cvPutText(cvimage_undistort, ss.str().c_str(), cvPoint(15,60), &font, CV_RGB(255,0,0));

      image_out->width = 704;
      image_out->height = 480;
      image_out->compression = "raw";
      image_out->colorspace = "rgb24";

      //      codec_out->realloc_raster_if_needed();
      cvSetData(cvimage_out, codec_out->get_raster(), 3*image_out->width);      
      cvConvertImage(cvimage_undistort, cvimage_out, CV_CVTIMG_SWAP_RB);

      codec_out->set_flow_data();

      image_out->publish();


      CvPoint2D32f COM = cvPoint2D32f(0,0);
    
      if (centering && corner_count > 20) {
	//average corners:
	for (int i = 0; i < corner_count; i++) {
	  COM.x += corners[i].x / corner_count;
	  COM.y += corners[i].y / corner_count;
	}
      
	if ( (fabs(COM.x - 354.0) > 10) || (fabs(COM.y - 240.0) > 10) ) {
	  float rel_pan,rel_tilt;

	  rel_pan = (COM.x - 354.0) * .001;
	  rel_tilt = -(COM.y - 240.0) * .001;

	  control->pan_val = rel_pan;      
	  control->pan_rel = true;
	  control->pan_valid = true;

	  control->tilt_val = rel_tilt;
	  control->tilt_rel = true;
	  control->tilt_valid = true;

	  control->publish();
	}

      }

      delete[] corners;
      
    } else {
      //      image_in->unlock_atom();
    }
  }
コード例 #25
0
void get_bird_eye(CvCapture* capture) {
	printf("haha\n");
	//get bird_eye picture
	cvNamedWindow("Get_birdeye");
	
	CvMat *intrinsic = (CvMat*) cvLoad("Intrinsics.xml");
	CvMat *distortion = (CvMat*) cvLoad("Distortion.xml");
	
	IplImage *image = cvQueryFrame(capture);
	IplImage *gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	
	int board_n = board_w * board_h;
	
	IplImage* mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage* mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	
	cvInitUndistortMap(
			intrinsic, 
			distortion, 
			mapx, 
			mapy
	);
	
	CvSize board_sz = cvSize(board_w, board_h);
	CvPoint2D32f* corners = new CvPoint2D32f[board_n];
	
	int frame = 0;
	int corner_count;
	bool catch_bird = false;


	while(!catch_bird) {
		IplImage *t = cvCloneImage(image);
		if(frame++ % board_dt == 0) {
			IplImage* t = cvCloneImage(image);
			cvRemap(t, image, mapx, mapy);
			
			int found = cvFindChessboardCorners(
					image, 
					board_sz, 
					corners, 
					&corner_count,
					CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
			);

			cvCvtColor(
					image, 
					gray_image, 
					CV_RGB2GRAY
			);
			
			cvFindCornerSubPix(
					gray_image, 
					corners, 
					corner_count, 
					cvSize(11,11), 
					cvSize(-1, -1), 
					cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)
			);
			
			cvDrawChessboardCorners(
					image, 
					board_sz, 
					corners, 
					corner_count, 
					found
			);
			
			cvShowImage("Get_birdeye", image);
			//get a good board, add to data
			if(corner_count == board_n) {
				catch_bird = true;
				printf("That's it!\n");
			}
		}
		int c;
		if(catch_bird) c = cvWaitKey(-1);
		else c = cvWaitKey(15);
		if(catch_bird && c == 's') {
			cvSaveImage("./Resource/bird-eye.jpg", t, 0);
			printf("save at ./Resource/bird-eye.jpg\n");
		}
		else catch_bird = false;
		if(c == 'p') {
			c = 0;
			while(c!='p' && c!= 27){
				c = cvWaitKey(250);
			}
		}
		image = cvQueryFrame(capture);
	}
}
コード例 #26
0
int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
			Z += 0.5;
		if (key == 'd')
			Z -= 0.5;
	}

	//SHOW ROTATION AND TRANSLATION VECTORS
	CvMat *image_points = cvCreateMat(4, 1, CV_32FC2);
	CvMat *object_points = cvCreateMat(4, 1, CV_32FC3);
	for (int i = 0; i < 4; ++i) {
		CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
		CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) =
			cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
	}

	CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F);
	CvMat *Rot = cvCreateMat(3, 3, CV_32F);
	CvMat *Trans = cvCreateMat(3, 1, CV_32F);
	cvFindExtrinsicCameraParams2(object_points, image_points,
								 intrinsic, distortion, RotRodrigues, Trans);
	cvRodrigues2(RotRodrigues, Rot);

	//SAVE AND EXIT
	cvSave("Rot.xml", Rot);
	cvSave("Trans.xml", Trans);
	cvSave("H.xml", H);
	cvInvert(H, H_invt);
	cvSave("H_invt.xml", H_invt);	//Bottom row of H invert is horizon line
	return 0;
}
コード例 #27
0
void cameraCalibration() {
	board_w = 5; // Board width in squares
	board_h = 8; // Board height
	n_boards = 8; // Number of boards
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CameraControl* cc = camera_control_new(0);


	//cvNamedWindow("Calibration", 0);
	// Allocate Sotrage
	CvMat* image_points = cvCreateMat(n_boards * board_n, 2, CV_32FC1);
	CvMat* object_points = cvCreateMat(n_boards * board_n, 3, CV_32FC1);
	CvMat* point_counts = cvCreateMat(n_boards, 1, CV_32SC1);
	CvMat* intrinsic_matrix = cvCreateMat(3, 3, CV_32FC1);
	CvMat* distortion_coeffs = cvCreateMat(5, 1, CV_32FC1);
	IplImage *image;

	CvPoint2D32f corners[board_n];
	int i = 0;
	int j = 0;

	for (i = 0; i < board_n; i++)
		corners[i] = cvPoint2D32f(0, 0);

	int corner_count;
	int successes = 0;
	int step = 0;



	while (1) {
		cvWaitKey(10);
		image = camera_control_query_frame(cc);
		if (image)
			break;
	}
	IplImage *gray_image = cvCreateImage(cvGetSize(image), 8, 1);

	// Capture Corner views loop until we've got n_boards
	// succesful captures (all corners on the board are found)
	while (successes < n_boards) {
		// Skp every board_dt frames to allow user to move chessboard
		// skip a second to allow user to move the chessboard
		image = camera_control_query_frame(cc); // Get next image
		//if (frame++ % board_dt == 0)
		{
			// Find chessboard corners:
			int found = cvFindChessboardCorners(image, board_sz, corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
			cvWaitKey(1);

			// Get subpixel accuracy on those corners
			cvCvtColor(image, gray_image, CV_BGR2GRAY);
			cvFindCornerSubPix(gray_image, corners, corner_count, cvSize(11, 11), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));

			// Draw it
			cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
			char text[222];
			sprintf(text, "calibration image %d/%d", successes, n_boards);
			th_put_text(image, text, cvPoint(20, 20), th_white, 1.0);
			cvShowImage("Calibration", image);

			// If we got a good board, add it to our data
			if (corner_count == board_n ) {
				step = successes * board_n;
				for (i = step, j = 0; j < board_n; ++i, ++j) {
					CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
					CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
					CV_MAT_ELEM( *object_points, float, i, 0 ) = j / board_w;
					CV_MAT_ELEM( *object_points, float, i, 1 ) = j % board_w;
					CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
				}
				CV_MAT_ELEM( *point_counts, int, successes, 0 ) = board_n;
				successes++;
			}

		}
	}
コード例 #28
0
ファイル: camera.cpp プロジェクト: Vorago/iwb
    void Camera::calibrate(Capture* cpt, Presentation* prs){
#ifdef NO_CALIBRATION
        printf("DEBUG: camera calibration disabled!\n");

        IplImage* cf = cvQueryFrame(cpt->getCapture());

        this->width = cf->width;
        this->height = cf->height;
        this->projectorWidth = this->width;
        this->projectorHeight = this->height;
        this->projectorOrigin = cvPoint(0,0);

//        cvReleaseImage (&cf);

        return;
#endif

        printf("DEBUG: calibrating camera\n");

        CvSize nsquares = cvSize(6,4);
        CvPoint2D32f* corners = new CvPoint2D32f[ 6*4 ];
        IplImage *cb = cvLoadImage("res/chessboard.png",1);
        prs->putImage(cvPoint(0,0), cvPoint(prs->getScreenWidth(), prs->getScreenHeight()), NULL, NULL, cb);
        prs->applyBuffer();

        //IplImage *fake = cvLoadImage("fake.jpg", 1);
        //IplImage *src = cvCreateImage(cvGetSize(fake), IPL_DEPTH_8U, 1);
        //cvCvtColor(fake, src, CV_RGB2GRAY);

        IplImage *frame;
        bool patternFound = false;
        int cc;

        while (!patternFound) {
            printf("trying to find chessboard\n");
            frame = cvQueryFrame(cpt->getCapture());
            patternFound = cvFindChessboardCorners(frame, nsquares, corners, &cc,
                                                   CV_CALIB_CB_ADAPTIVE_THRESH | 
                                                   CV_CALIB_CB_FILTER_QUADS | 
                                                   CV_CALIB_CB_FAST_CHECK |
                                                   CV_CALIB_CB_NORMALIZE_IMAGE);
//            prs->putImage(cvPoint(0,0), cvPoint(prs->getScreenWidth(), prs->getScreenHeight()), frame);

//            prs->applyBuffer();
            cvWaitKey(5);
        }
        this->width = frame->width;
        this->height = frame->height;
        printf("\n");
        //float x = 2*corners[0].x-corners[1].x,
        //      y = 2*corners[0].y-corners[7].y;
        float x = corners[0].x-1.5*(corners[1].x-corners[0].x),
              y = corners[0].y-1.5*(corners[6].y-corners[0].y);

        this->projectorOrigin = cvPoint((int)x, (int)y);

        //x = 2*corners[34].x-corners[33].x;
        //y = 2*corners[34].y-corners[27].y;
        x = corners[23].x+1.5*(corners[23].x-corners[22].x);
        y = corners[23].y+1.5*(corners[23].y-corners[17].y);


        this->projectorWidth = (int)(x-this->projectorOrigin.x);
        this->projectorHeight = (int)(y-this->projectorOrigin.y);

        printf("Projector: (%d, %d): %dx%d)\n", this->projectorOrigin.x, this->projectorOrigin.y, this->projectorWidth,this->projectorHeight);

        //cpt->saveFrame("cbfound.jpg", frame);

        // improve result (thoug through testing, it seems it makes it worse - disabling)
        // could use simple linear regression to get a "better" calibration.
//        IplImage *frame_gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
//        cvFindCornerSubPix( frame_gray, corners, cc, cvSize( 11, 11 ), 
//                        cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

        // draw calibration result
        cvDrawChessboardCorners( frame, nsquares , corners, cc, patternFound );
        IplImage* bg;
        bg = cvLoadImage("res/bg.jpg", CV_LOAD_IMAGE_UNCHANGED);
        prs->putImage(cvPoint(0,0), cvPoint(prs->getScreenWidth(), prs->getScreenHeight()), NULL, NULL, bg);
        prs->applyBuffer();
        // display result longer
        cvWaitKey(2000);
        prs->clearArea(cvPoint(0,0), cvPoint(prs->getScreenWidth(), prs->getScreenHeight()));
        prs->applyBuffer();
        cvWaitKey(1000);
        
        for (int i=0; i<100; i++)
            cvQueryFrame(cpt->getCapture());
        //cvNamedWindow("Foobar", CV_WINDOW_AUTOSIZE);
        //cvShowImage("Foobar", cvQueryFrame(cpt->getCapture()));
        //cvWaitKey(500);

//        cvReleaseImage(&frame);


// generate chessboard programatically. (htf does cvFillPoly work?! - using image for now)
//        IplImage *cb = cvCreateImage(cvSize(prs->getScreenWidth(), prs->getScreenHeight()), IPL_DEPTH_8U, 1);
//        int x,y, dx = prs->getScreenWidth()/nsquares.width, dy = prs->getScreenHeight()/nsquares.height;
//        CvPoint *corners[4];
//        for (x=0; x<prs->getScreenWidth(); x+=dx) {
//            for (y=0; y<prs->getScreenHeight(); y+=dy) {
//                corners[0] = cvPoint(x,y);
//                corners[1] = cvPoint(x+dx,y);
//                corners[2] = cvPoint(x+dx,y+dy);
//                corners[3] = cvPoint(x,y+dy);
//                cvFillPoly(...)
//            }
//        }
    }