Esempio n. 1
0
void CamCalib::CalibrateCamera(CvSize &image_size)
{
	if (_intrinsic_matrix)  cvReleaseMat(&_intrinsic_matrix);
	if (_distortion_coeffs) cvReleaseMat(&_distortion_coeffs);

	if (_mapx) cvReleaseImage(&_mapx);
	if (_mapy) cvReleaseImage(&_mapy);

	_intrinsic_matrix  = cvCreateMat(3, 3, CV_32FC1);
	_distortion_coeffs = cvCreateMat(4, 1, CV_32FC1);

	// 초점 거리 비율을 1.0으로 설정하여 내부행렬을 초기화
	CV_MAT_ELEM( *_intrinsic_matrix, float, 0, 0 ) = 1.0f;
	CV_MAT_ELEM( *_intrinsic_matrix, float, 1, 1 ) = 1.0f;

	// 실제 카메라 보정함수
	cvCalibrateCamera2 (_object_points, _image_points, _point_counts, image_size, _intrinsic_matrix, _distortion_coeffs, NULL, NULL, 0);

	// 내부 행렬과 왜곡 계수를 파일로 저장
	cvSave("Intrinsics.xml", _intrinsic_matrix);
	cvSave("Distortion.xml", _distortion_coeffs);

	// 왜곡 제거를 위한 지도를 생성
	_mapx = cvCreateImage( image_size, IPL_DEPTH_32F, 1 );
	_mapy = cvCreateImage( image_size, IPL_DEPTH_32F, 1 );

	// 왜곡 제거를 위한 지도를 구성
	cvInitUndistortMap (_intrinsic_matrix, _distortion_coeffs, _mapx, _mapy);
}
Esempio n. 2
0
bool CamIntrinsics::calibrateCam(ChessboardFinder* finder)
{
	//int numFound = 0;
	int flags = 0;	

	if(finder->numFound() == 0) {
		fprintf(stderr, "Cannot calibrate camera with no chessboard images!\n");
		return false;
	}

	finder->generateMatrices();

	cvCalibrateCamera2(
		// In
		finder->objectPoints,
		finder->imagePoints,
		finder->pointCounts,
		imageSize,

		// Out
		intrinsic,
		distortion,
		rotation,		// TODO: Optional
		translation,	// TODO: Optional

		flags
	);

	return true; // XXX: TEMP--No real status here
}
Esempio n. 3
0
void cvCalibrateCamera_64d( int image_count, int* _point_counts,
    CvSize image_size, CvPoint2D64f* _image_points, CvPoint3D64f* _object_points,
    double* _distortion_coeffs, double* _camera_matrix, double* _translation_vectors,
    double* _rotation_matrices, int flags )
{
    int i, total = 0;
    CvMat point_counts = cvMat( image_count, 1, CV_32SC1, _point_counts );
    CvMat image_points, object_points;
    CvMat dist_coeffs = cvMat( 4, 1, CV_64FC1, _distortion_coeffs );
    CvMat camera_matrix = cvMat( 3, 3, CV_64FC1, _camera_matrix );
    CvMat rotation_matrices = cvMat( image_count, 9, CV_64FC1, _rotation_matrices );
    CvMat translation_vectors = cvMat( image_count, 3, CV_64FC1, _translation_vectors );

    for( i = 0; i < image_count; i++ )
        total += _point_counts[i];

    image_points = cvMat( total, 1, CV_64FC2, _image_points );
    object_points = cvMat( total, 1, CV_64FC3, _object_points );

    cvCalibrateCamera2( &object_points, &image_points, &point_counts, image_size,
        &camera_matrix, &dist_coeffs, &rotation_matrices, &translation_vectors,
        flags );
}
int CCalibration::CalibrateFromCAMorAVI( int nImages, int nChessRow, int nChessCol, int nSquareSize, int nSkip, CvMat* intrinsic, CvMat* distortion, int fromCamera, char* fName)
{
    int nChessSize = nChessRow*nChessCol;
    int nAllPoints = nImages*nChessSize;
    int i, j, k;
    int corner_count, found;
    int* p_count = new int[nImages];
    IplImage **src_img = new IplImage*[nImages];
    CvSize pattern_size = cvSize (nChessCol, nChessRow);
    CvPoint3D32f* objects = new CvPoint3D32f[nAllPoints];
    CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * nAllPoints);
    CvMat object_points;
    CvMat image_points;
    CvMat point_counts;
    CvMat *rotation = cvCreateMat (1, 3, CV_32FC1);
    CvMat *translation = cvCreateMat (1, 3, CV_32FC1);

    // (1)
    for (i = 0; i < nImages; i++) {
        for (j = 0; j < nChessRow; j++) {
            for (k = 0; k < nChessCol; k++) {
                objects[i * nChessSize + j * nChessCol + k].x = (float)j * nSquareSize;
                objects[i * nChessSize + j * nChessCol + k].y = (float)k * nSquareSize;
                objects[i * nChessSize + j * nChessCol + k].z = 0.0;
            }
        }
    }
    cvInitMatHeader (&object_points, nAllPoints, 3, CV_32FC1, objects);

    // (2)
    CvCapture *capture = NULL;
    if (fromCamera)
        capture = cvCaptureFromCAM(0);
    else
        capture = cvCaptureFromAVI(fName);
    assert(capture);

    int found_num = 0;
    cvNamedWindow ("Calibration", CV_WINDOW_AUTOSIZE);
    cvNamedWindow ("Webcam", CV_WINDOW_AUTOSIZE);
    int c = 0;
    for (i = 0; i < nImages; i++)
    {
        IplImage * frame;
        while (true)
        {
            frame = cvQueryFrame(capture);
            cvShowImage("Webcam", frame);

            if (c++ % nSkip == 0)
            {
                found = cvFindChessboardCorners (frame, pattern_size, &corners[i * nChessSize], &corner_count);
                if (found)
                {
                    char s[100];
                    sprintf(s, "%d.png", i);
                    cvSaveImage(s, frame);
                    src_img[i] = cvCloneImage(frame);
                    fprintf (stderr, "ok\n");
                    found_num++;
                    break;
                }
            }
            cvWaitKey(5);
        }
        fprintf (stderr, "%02d...", i);

        // (4)
        IplImage *src_gray = cvCreateImage (cvGetSize (src_img[i]), IPL_DEPTH_8U, 1);
        cvCvtColor (src_img[i], src_gray, CV_BGR2GRAY);
        cvFindCornerSubPix (src_gray, &corners[i * nChessSize], corner_count,
                            cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
        cvDrawChessboardCorners (src_img[i], pattern_size, &corners[i * nChessSize], corner_count, found);
        p_count[i] = corner_count;
        cvShowImage ("Calibration", src_img[i]);
        //cvWaitKey (0);
    }
    cvDestroyWindow ("Calibration");

    if (found_num != nImages)
        return -1;
    cvInitMatHeader (&image_points, nAllPoints, 1, CV_32FC2, corners);
    cvInitMatHeader (&point_counts, nImages, 1, CV_32SC1, p_count);
    // (5)
    cvCalibrateCamera2 (&object_points, &image_points, &point_counts, cvSize (640, 480), intrinsic, distortion);

    // (6)
    /*
    CvMat sub_image_points, sub_object_points;
    int base = 0;
    cvGetRows (&image_points, &sub_image_points, base * nChessSize, (base + 1) * nChessSize);
    cvGetRows (&object_points, &sub_object_points, base * nChessSize, (base + 1) * nChessSize);
    cvFindExtrinsicCameraParams2 (&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);
    */
    // (7)ToXML
    /*
    CvFileStorage *fs;
    fs = cvOpenFileStorage ("camera.xml", 0, CV_STORAGE_WRITE);
    cvWrite (fs, "intrinsic", intrinsic);
    cvWrite (fs, "rotation", rotation);
    cvWrite (fs, "translation", translation);
    cvWrite (fs, "distortion", distortion);
    cvReleaseFileStorage (&fs);*/

    for (i = 0; i < nImages; i++)
        cvReleaseImage (&src_img[i]);
    delete p_count;
    delete src_img;
    delete objects;
    return 1;
}
int CCalibration::CalibrateFromFiles(int nImages, int nChessRow, int nChessCol, float nSquareSize, CvMat* intrinsic, CvMat* distortion)
{
    int nChessSize = nChessRow*nChessCol;
    int nAllPoints = nImages*nChessSize;
    int i, j, k;
    int corner_count, found;
    int* p_count = new int[nImages];
    IplImage **src_img = new IplImage*[nImages];
    CvSize pattern_size = cvSize (nChessCol, nChessRow);
    CvPoint3D32f* objects = new CvPoint3D32f[nAllPoints];
    CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * nAllPoints);
    CvMat object_points;
    CvMat image_points;
    CvMat point_counts;
    //CvMat *rotation = cvCreateMat (1, 3, CV_32FC1);
    //CvMat *translation = cvCreateMat (1, 3, CV_32FC1);

    CvMat *PMatrix = cvCreateMat (3, 4, CV_32FC1);
    CvMat *tmp33 = cvCreateMat(3,3,CV_32FC1);
    CvMat *tmp31 = cvCreateMat(3,1,CV_32FC1);
    CvMat *roTrans = cvCreateMat(3,3,CV_32FC1);
    CvMat *translationTrans = cvCreateMat(3,1,CV_32FC1);

    // (1)
    for (i = 0; i < nImages; i++) {
        char buf[32];
        sprintf (buf, "%d.png", i);
        if ((src_img[i] = cvLoadImage (buf, CV_LOAD_IMAGE_COLOR)) == NULL) {
            fprintf (stderr, "cannot load image file : %s\n", buf);
        }
    }

    // (2)
    for (i = 0; i < nImages; i++) {
        for (j = 0; j < nChessRow; j++) {
            for (k = 0; k < nChessCol; k++) {
                objects[i * nChessSize + j * nChessCol + k].x = j * nSquareSize;
                objects[i * nChessSize + j * nChessCol + k].y = k * nSquareSize;
                objects[i * nChessSize + j * nChessCol + k].z = 0.0;
            }
        }
    }
    cvInitMatHeader (&object_points, nAllPoints, 3, CV_32FC1, objects);

    // (3)
    int found_num = 0;
    //cvNamedWindow ("Calibration", CV_WINDOW_AUTOSIZE);
    for (i = 0; i < nImages; i++) {
        found = cvFindChessboardCorners (src_img[i], pattern_size, &corners[i * nChessSize], &corner_count);
        fprintf (stderr, "%02d...", i);
        if (found) {
            fprintf (stderr, "ok\n");
            found_num++;
        }
        else {
            fprintf (stderr, "fail\n");
        }
        // (4)
        IplImage *src_gray = cvCreateImage (cvGetSize (src_img[i]), IPL_DEPTH_8U, 1);
        cvCvtColor (src_img[i], src_gray, CV_BGR2GRAY);
        cvFindCornerSubPix (src_gray, &corners[i * nChessSize], corner_count,
                            cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
        p_count[i] = corner_count;
        cvWaitKey (0);
    }

    if (found_num != nImages)
        return -1;
    cvInitMatHeader (&image_points, nAllPoints, 1, CV_32FC2, corners);
    cvInitMatHeader (&point_counts, nImages, 1, CV_32SC1, p_count);

    // (5)
    cvCalibrateCamera2 (&object_points, &image_points, &point_counts, cvSize (640, 480), intrinsic, distortion);

    /*  (6) ***************** TEST EXTRINSIC PARAMETERS ******************
    CvMat sub_image_points, sub_object_points;
    int base = 0;
    cvNamedWindow ("Calibration", CV_WINDOW_AUTOSIZE);
    CModel myBox("box.txt");
    for (i=0; i<nImages; i++)
    {
    	cvGetRows (&image_points, &sub_image_points, base * nChessSize, (base + 1) * nChessSize);
    	cvGetRows (&object_points, &sub_object_points, base * nChessSize, (base + 1) * nChessSize);
    	cvFindExtrinsicCameraParams2 (&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);

    	CvMat *rotMat = cvCreateMat(3, 3, CV_32FC1);
    	cvRodrigues2(rotation, rotMat);

    	//cvTranspose(rotMat,roTrans);
    	cvTranspose(translation,translationTrans);
    	CvMat* tmp34 = cvCreateMat(3,4,CV_32FC1);
    	for (int i = 0; i < 3; i++)
    		for (int j=0; j<3; j++)
    			cvmSet(tmp34,i,j,cvmGet(rotMat,i,j));
    	for (int i = 0; i < 3; i++)
    		cvmSet(tmp34,i,3,cvmGet(translationTrans,i,0));
    	cvMatMul(intrinsic,tmp34,PMatrix);

    	myBox.Draw(src_img[i], PMatrix, cvScalar(0, 255, 0), 2);

    	cvShowImage ("Calibration", src_img[i]);
    	cvWaitKey(0);
    	base++;
    }*/
    cvDestroyWindow("Calibration");
    // Luu ket qua
    /*	CvFileStorage *fs;
    	fs = cvOpenFileStorage ("camera.xml", 0, CV_STORAGE_WRITE);
    	cvWrite (fs, "intrinsic", intrinsic);
    	cvWrite (fs, "rotation", rotation);
    	cvWrite (fs, "translation", translation);
    	cvWrite (fs, "distortion", distortion);
    	cvReleaseFileStorage (&fs);*/

    for (i = 0; i < nImages; i++) {
        cvReleaseImage (&src_img[i]);
    }
    delete p_count;
    delete src_img;
    delete objects;
    return 1;
}
Esempio n. 6
0
int CybCamCalibration::calibration(CvSeq* image_points_seq, CvSize img_size, CvSize board_size,
                                   float square_size, float aspect_ratio, int flags, CvMat* camera_matrix,
                                   CvMat* dist_coeffs, CvMat** extr_params, CvMat** reproj_errs,
                                   double* avg_reproj_err) {
    int code;
    int image_count = image_points_seq->total;
    int point_count = board_size.width*board_size.height;
    CvMat* image_points = cvCreateMat( 1, image_count*point_count, CV_32FC2);
    CvMat* object_points = cvCreateMat( 1, image_count*point_count, CV_32FC3);
    CvMat* point_counts = cvCreateMat( 1, image_count, CV_32SC1);
    CvMat rot_vects, trans_vects;
    int i, j, k;
    CvSeqReader reader;
    cvStartReadSeq(image_points_seq, &reader);

    // initialize arrays of points
    for (i = 0; i < image_count; i++) {
        CvPoint2D32f* src_img_pt = (CvPoint2D32f*)reader.ptr;
        CvPoint2D32f* dst_img_pt = ((CvPoint2D32f*)image_points->data.fl) + i
                                   *point_count;
        CvPoint3D32f* obj_pt = ((CvPoint3D32f*)object_points->data.fl) + i
                               *point_count;

        for (j = 0; j < board_size.height; j++)
            for (k = 0; k < board_size.width; k++) {
                *obj_pt++ = cvPoint3D32f(j*square_size, k*square_size, 0);
                *dst_img_pt++ = *src_img_pt++;
            }
        CV_NEXT_SEQ_ELEM( image_points_seq->elem_size, reader );
    }

    cvSet(point_counts, cvScalar(point_count) );

    *extr_params = cvCreateMat(image_count, 6, CV_32FC1);
    cvGetCols( *extr_params, &rot_vects, 0, 3);
    cvGetCols( *extr_params, &trans_vects, 3, 6);

    cvZero( camera_matrix );
    cvZero( dist_coeffs );

    if (flags & CV_CALIB_FIX_ASPECT_RATIO) {
        camera_matrix->data.db[0] = aspect_ratio;
        camera_matrix->data.db[4] = 1.;
    }

    cvCalibrateCamera2(object_points, image_points, point_counts, img_size,
                       camera_matrix, dist_coeffs, &rot_vects, &trans_vects, flags);

    code = cvCheckArr(camera_matrix, CV_CHECK_QUIET) && cvCheckArr(dist_coeffs,
            CV_CHECK_QUIET) && cvCheckArr( *extr_params, CV_CHECK_QUIET);

    *reproj_errs = cvCreateMat( 1, image_count, CV_64FC1);
    *avg_reproj_err = compute_error(object_points, &rot_vects,
                                    &trans_vects, camera_matrix, dist_coeffs, image_points,
                                    point_counts, *reproj_errs);

    cvReleaseMat( &object_points);
    cvReleaseMat( &image_points);
    cvReleaseMat( &point_counts);

    return code;
}
Esempio n. 7
0
int
main (int argc, char *argv[])
{
  int i, j, k;
  int corner_count, found;
  int p_count[IMAGE_NUM];
  IplImage *src_img[IMAGE_NUM];
  CvSize pattern_size = cvSize (PAT_COL, PAT_ROW);
  CvPoint3D32f objects[ALL_POINTS];
  CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * ALL_POINTS);
  CvMat object_points;
  CvMat image_points;
  CvMat point_counts;
  CvMat *intrinsic = cvCreateMat (3, 3, CV_32FC1);
  CvMat *rotation = cvCreateMat (1, 3, CV_32FC1);
  CvMat *translation = cvCreateMat (1, 3, CV_32FC1);
  CvMat *distortion = cvCreateMat (1, 4, CV_32FC1);

  // (1)キャリブレーション画像の読み込み
  for (i = 0; i < IMAGE_NUM; i++) {
    char buf[32];
    sprintf (buf, "calib_img/%02d.png", i);
    if ((src_img[i] = cvLoadImage (buf, CV_LOAD_IMAGE_COLOR)) == NULL) {
      fprintf (stderr, "cannot load image file : %s\n", buf);
    }
  }

  // (2)3次元空間座標の設定
  for (i = 0; i < IMAGE_NUM; i++) {
    for (j = 0; j < PAT_ROW; j++) {
      for (k = 0; k < PAT_COL; k++) {
        objects[i * PAT_SIZE + j * PAT_COL + k].x = j * CHESS_SIZE;
        objects[i * PAT_SIZE + j * PAT_COL + k].y = k * CHESS_SIZE;
        objects[i * PAT_SIZE + j * PAT_COL + k].z = 0.0;
      }
    }
  }
  cvInitMatHeader (&object_points, ALL_POINTS, 3, CV_32FC1, objects);

  // (3)チェスボード(キャリブレーションパターン)のコーナー検出
  int found_num = 0;
  cvNamedWindow ("Calibration", CV_WINDOW_AUTOSIZE);
  for (i = 0; i < IMAGE_NUM; i++) {
    found = cvFindChessboardCorners (src_img[i], pattern_size, &corners[i * PAT_SIZE], &corner_count);
    fprintf (stderr, "%02d...", i);
    if (found) {
      fprintf (stderr, "ok\n");
      found_num++;
    }
    else {
      fprintf (stderr, "fail\n");
    }
    // (4)コーナー位置をサブピクセル精度に修正,描画
    IplImage *src_gray = cvCreateImage (cvGetSize (src_img[i]), IPL_DEPTH_8U, 1);
    cvCvtColor (src_img[i], src_gray, CV_BGR2GRAY);
    cvFindCornerSubPix (src_gray, &corners[i * PAT_SIZE], corner_count,
                        cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
    cvDrawChessboardCorners (src_img[i], pattern_size, &corners[i * PAT_SIZE], corner_count, found);
    p_count[i] = corner_count;
    cvShowImage ("Calibration", src_img[i]);
    cvWaitKey (0);
  }
  cvDestroyWindow ("Calibration");

  if (found_num != IMAGE_NUM)
    return -1;
  cvInitMatHeader (&image_points, ALL_POINTS, 1, CV_32FC2, corners);
  cvInitMatHeader (&point_counts, IMAGE_NUM, 1, CV_32SC1, p_count);

  // (5)内部パラメータ,歪み係数の推定
  cvCalibrateCamera2 (&object_points, &image_points, &point_counts, cvSize (640, 480), intrinsic, distortion);

  // (6)外部パラメータの推定
  CvMat sub_image_points, sub_object_points;
  int base = 0;
  cvGetRows (&image_points, &sub_image_points, base * PAT_SIZE, (base + 1) * PAT_SIZE);
  cvGetRows (&object_points, &sub_object_points, base * PAT_SIZE, (base + 1) * PAT_SIZE);
  cvFindExtrinsicCameraParams2 (&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);

  // (7)XMLファイルへの書き出し
  CvFileStorage *fs;
  fs = cvOpenFileStorage ("camera.xml", 0, CV_STORAGE_WRITE);
  cvWrite (fs, "intrinsic", intrinsic);
  cvWrite (fs, "rotation", rotation);
  cvWrite (fs, "translation", translation);
  cvWrite (fs, "distortion", distortion);
  cvReleaseFileStorage (&fs);

  for (i = 0; i < IMAGE_NUM; i++) {
    cvReleaseImage (&src_img[i]);
  }

  return 0;
}
Esempio n. 8
0
void calibrateViews(std::vector<CvPoint2D32f>& corners, int rows, int cols)
{

  int nViews = corners.size()/(rows*cols);
  LINFO("Calibrate: %i views", nViews);

  if (nViews <= 0)
  {
    LINFO("No corners avl");
    return;
  }

  // Set up the object points matrix
  // Squares are size set in defines found in header file.
  CvMat* object_points = cvCreateMat(corners.size(), 3, CV_32FC1);
  for(uint k=0; k < corners.size()/(rows*cols); k++ )
  {
    for(int i=0; i < cols; i++ )
    {
      for(int j=0; j < rows; j++ )
      {
        cvmSet( object_points, k*(rows*cols) + i*rows + j, 0, GRID_SIZE*j );        // x coordinate
        cvmSet( object_points, k*(rows*cols) + i*rows + j, 1, GRID_SIZE*i ); // y coordinate
        cvmSet( object_points, k*(rows*cols) + i*rows + j, 2, 0 ); // z coordinate
      }
    }
  }

  //for (uint j = 0; j < corners.size(); j++){
  //  cvSetReal2D( object_points, j, 0, ( j % rows) * GRID_SIZE );
  //  cvSetReal2D( object_points, j, 1, ( j / rows) * GRID_SIZE );
  //  cvSetReal2D( object_points, j, 2, 0.0 );
        //}

        // Set up the matrix of points per image
        CvMat* point_counts = cvCreateMat(1, nViews, CV_32SC1);
        for(int i=0; i < nViews; i++ )
                cvSetReal1D( point_counts, i, rows*cols );

        // Copy corners found to matrix
        CvMat image_points = cvMat(corners.size(), 2, CV_32FC1, &corners[0]);



  int flags = 0;

        // Initiliazie the intrinsic matrix such that the two focal lengths
        // have a ratio of 1.0
 cvmSet( itsIntrinsicMatrix, 0, 0, 1.0);
 cvmSet( itsIntrinsicMatrix, 1, 1, 1.0);


  //flags = CV_CALIB_FIX_PRINCIPAL_POINT; // | CV_CALIB_USE_INTRINSIC_GUESS;
  //flags =  CV_CALIB_USE_INTRINSIC_GUESS;
  flags =  CV_CALIB_FIX_ASPECT_RATIO;

  cvCalibrateCamera2( object_points, &image_points, point_counts, cvSize(320,240),
      itsIntrinsicMatrix, itsDistortionCoeffs,
      NULL, NULL,
      flags);


  //display results
 // Image<byte> in = itsPCameraCellsInput[0];
 // Image<byte> out(in.getDims(), ZEROS);

 // cvUndistort2( img2ipl(in), img2ipl(out), itsIntrinsicMatrix, itsDistortionCoeffs);

 // //itsDebugImg = out;


  cvReleaseMat( &object_points);
  cvReleaseMat( &point_counts);

}
// --------------------------------------------------------------------------
// main(Number of arguments, Argument values)
// Description  : This is the entry point of the program.
// Return value : SUCCESS:0  ERROR:-1
// --------------------------------------------------------------------------
int main(int argc, char **argv)
{
    // AR.Drone class
    ARDrone ardrone;

    // Initialize
    if (!ardrone.open()) {
        printf("Failed to initialize.\n");
        return -1;
    }

    // Images
    std::vector<IplImage*> images;
    printf("Press space key to take a sample picture !\n");

    // Main loop
    while (1) {
        // Key input
        int key = cvWaitKey(1);
        if (key == 0x1b) break;

        // Update
        if (!ardrone.update()) break;

        // Get an image
        IplImage *image = ardrone.getImage();

        // Convert the camera image to grayscale
        IplImage *gray = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
        cvCvtColor(image, gray, CV_BGR2GRAY);

        // Detect the chessboard
        int corner_count = 0;
        CvSize size = cvSize(PAT_COL, PAT_ROW);
        CvPoint2D32f corners[PAT_SIZE];
        int found = cvFindChessboardCorners(gray, size, corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE|CV_CALIB_CB_FAST_CHECK);

        // Chessboard detected
        if (found) {
            // Draw corners
            cvDrawChessboardCorners(image, size, corners, corner_count, found);

            // If you push Space key
            if (key == ' ') {
                // Add to buffer
                images.push_back(gray);
            }
            else {
                // Release the image
                cvReleaseImage(&gray);
            }
        }
        // Failed to detect
        else {
            // Release the image
            cvReleaseImage(&gray);
        }

        // Display the image
        cvDrawText(image, cvPoint(15, 20), "NUM = %d", (int)images.size());
        cvShowImage("camera", image);
    }

    // Destroy the window
    cvDestroyWindow("camera");

    // At least one image was taken
    if (!images.empty()) {
        // Total number of images
        const int num = (int)images.size();

        //// For debug
        //for (int i = 0; i < num; i++) {
        //    char name[256];
        //    sprintf(name, "images[%d/%d]", i+1, num);
        //    cvShowImage(name, images[i]);
        //    cvWaitKey(0);
        //    cvDestroyWindow(name);
        //}

        // Ask save parameters or not
        if (cvAsk("Do you save the camera parameters ? (y/n)\n")) {
            // Detect coners
            int *p_count = (int*)malloc(sizeof(int) * num);
            CvPoint2D32f *corners = (CvPoint2D32f*)cvAlloc(sizeof(CvPoint2D32f) * num * PAT_SIZE);
            for (int i = 0; i < num; i++) {
                // Detect chessboard
                int corner_count = 0;
                CvSize size = cvSize(PAT_COL, PAT_ROW);
                int found = cvFindChessboardCorners(images[i], size, &corners[i * PAT_SIZE], &corner_count);

                // Convert the corners to sub-pixel
                cvFindCornerSubPix(images[i], &corners[i * PAT_SIZE], corner_count, cvSize(3, 3), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
                p_count[i] = corner_count;
            }

            // Set the 3D position of patterns
            CvPoint3D32f *objects = (CvPoint3D32f*)cvAlloc(sizeof(CvPoint3D32f) * num * PAT_SIZE);
            for (int i = 0; i < num; i++) {
                for (int j = 0; j < PAT_ROW; j++) {
                    for (int k = 0; k < PAT_COL; k++) {
                        objects[i * PAT_SIZE + j * PAT_COL + k].x = j * CHESS_SIZE;
                        objects[i * PAT_SIZE + j * PAT_COL + k].y = k * CHESS_SIZE;
                        objects[i * PAT_SIZE + j * PAT_COL + k].z = 0.0;
                    }
                }
            }

            // Create matrices
            CvMat object_points, image_points, point_counts;
            cvInitMatHeader(&object_points, num * PAT_SIZE, 3, CV_32FC1, objects);
            cvInitMatHeader(&image_points,  num * PAT_SIZE, 1, CV_32FC2, corners);
            cvInitMatHeader(&point_counts,  num,            1, CV_32SC1, p_count);

            // Estimate intrinsic parameters and distortion coefficients
            printf("Calicurating parameters...");
            CvMat *intrinsic   = cvCreateMat(3, 3, CV_32FC1);
            CvMat *distortion  = cvCreateMat(1, 4, CV_32FC1);
            cvCalibrateCamera2(&object_points, &image_points, &point_counts, cvGetSize(images[0]), intrinsic, distortion);
            printf("Finished !\n");

            // Output a file
            printf("Generating a XML file...");
            CvFileStorage *fs = cvOpenFileStorage("camera.xml", 0, CV_STORAGE_WRITE);
            cvWrite(fs, "intrinsic", intrinsic);
            cvWrite(fs, "distortion", distortion);
            cvReleaseFileStorage(&fs);    
            printf("Finished !\n");

            // Release the matrices
            free(p_count);
            cvFree(&corners);
            cvFree(&objects);
            cvReleaseMat(&intrinsic);
            cvReleaseMat(&distortion);
        }

        // Release the images
        for (int i = 0; i < num; i++) cvReleaseImage(&images[i]);
    }

    // See you
    ardrone.close();

    return 0;
}
Esempio n. 10
0
bool CalibrationFilter::calibrateCamera() 
{
	int numImages = captures.size();
//	int numExpectedPoints = targetCorners.nx*targetCorners.ny;
	
//	int numPoints = numImages*numExpectedPoints;

	CvPoint2D32f* screenPoints = new CvPoint2D32f[numPoints];
    for( int i=0; i<numPoints; ++i ) {
        screenPoints[i].x = found_points[i].x;
        screenPoints[i].y = found_points[i].y;
		
    }
    
	CvPoint3D32f* worldPoints = new CvPoint3D32f[numPoints];
    for( int i=0; i<numPoints; ++i ) {
        screenPoints[i].x = calibObject.x;
        screenPoints[i].y = calibObject.y;
        screenPoints[i].z = calibObject.y;
    }
	
	
	CvVect32f translation_vectors = NULL;
	CvMatr32f rotation_matrices = NULL;
	
	if (settings.calc_obj_pos)
	{
		translation_vectors = new float[3*numImages];
		rotation_matrices = new float[9*numImages];
	}

	cvCalibrateCamera2(screenPoints,				//Pointer 2D points in screen space. 
					   worldPoints,					//Pointer 3D points in real space
					   numPoints,
					   cvSize(videoSize.x, videoSize.y),			//Size of the image.
					   camera.distCoeffs,			//output: 4 distortion coefficients
					   camera.intrinsics,			//output: intrinsic camera matrix
					   translation_vectors,			//output: Array of translations vectors
					   rotation_matrices,			//output: Array of rotation matrices
					   settings.guess_intrinsics);	//intrisic guess needed

	delete numFoundPoints;
	delete screenPoints;
	delete worldPoints;
	
	if (settings.calc_obj_pos)
	{
		delete translation_vectors;
		delete rotation_matrices;
	}
	
	if (settings.do_interpolate != camera.was_interpolated)
	{
		delete camera.undistMap;

		int mult = settings.do_interpolate? 3 : 1;
		camera.undistMap = cvCreateImage(cvSize(mult*videoSize.x, mult*videoSize.y), CV_32S, 1);
	}

	cvUnDistortInit(input.getCvImage(),
					camera.undistMap,				//undistortion map (calc once, use many times)
					camera.intrinsics,				//intrinsic camera matrix
					camera.distCoeffs,				//4 distortion coefficients
					settings.do_interpolate);		//
	
	camera.was_interpolated = settings.do_interpolate;
}
RTC::ReturnCode_t ImageCalibration::onExecute(RTC::UniqueId ec_id)
{	
	
	board_sz = cvSize(m_board_w, m_board_h);
	
	//Calibrationパターンを計算する。
	if (m_inputImageIn.isNew()) {

		m_inputImageIn.read();

		if(m_keyIn.isNew()){
			m_keyIn.read();
			key = (int)m_key.data;
		}
		
		if(g_temp_w != m_inputImage.width || g_temp_h != m_inputImage.height){
		
			inputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			outputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			tempImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			undistortionImage = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			birds_image = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			
			intrinsicMatrix = cvCreateMat(3,3,CV_64FC1);
			distortionCoefficient = cvCreateMat(4,1,CV_64FC1);
			
			captureCount = 0;
			findFlag = 0;

			mapx = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);
			mapy = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);

			corners = new CvPoint2D32f[m_board_w * m_board_h];
			
			g_temp_w = m_inputImage.width;
			g_temp_h = m_inputImage.height;
		
		}

		//Capture開始する。
		memcpy(inputImage_buff->imageData,(void *)&(m_inputImage.pixels[0]), m_inputImage.pixels.length());

//		tempImage_buff = cvCloneImage(inputImage_buff);
		//OutPortに出力する。
		int len = inputImage_buff->nChannels * inputImage_buff->width * inputImage_buff->height;
		m_origImage.pixels.length(len);
		
		memcpy((void *)&(m_origImage.pixels[0]), inputImage_buff->imageData, len);
		m_origImage.width = inputImage_buff->width;
		m_origImage.height = inputImage_buff->height;

		m_origImageOut.write();
		
		//Capture確認用のWindowの生成
		//cvShowImage("Capture", inputImage_buff);
		cvWaitKey(1);
		
		//SpaceBarを押すとサンプル映像5枚を撮る
		if (key == ' ') {
			
			tempImage_buff = cvCloneImage(inputImage_buff);
			//映像を生成する
			IplImage *grayImage = cvCreateImage(cvGetSize(tempImage_buff), 8, 1);

			//行列の生成
			CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //世界座標用行列
			CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES ,2, CV_64FC1); //画像座標用行列
			CvMat *pointCounts = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 1, CV_32SC1); //コーナー数の行列
			CvMat *rotationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //回転ベクトル
			CvMat *translationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); 

			//世界座標を設定する
			for (int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				for ( int j = 0; j < (m_board_w * m_board_h); j++) {
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 0, (j % m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 1, (j / m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 2, 0.0);
				}
			}

			//コーナー数を設定
			for(int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				cvSetReal2D(pointCounts, i, 0, (m_board_w * m_board_h));
			}
			
			//コーナーを検出する。
			findFlag = findCorners(tempImage_buff, grayImage, corners);

			if (findFlag != 0) {
			
				//コーナーをすべて検出した場合
				//映像座標を設定する。
				for (;;){
					for (int i = 0; i < (m_board_w * m_board_h); i++){
 						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 0, corners[i].x);
						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 1, corners[i].y);
					}
				
					captureCount++;    

					printf("%d枚目キャプチャしました\n", captureCount);

					if (captureCount == NUM_OF_BACKGROUND_FRAMES) {
						//設定した回数チェックパターンを撮った場合
						//カメラパラメータを推定する。
						cvCalibrateCamera2(
							worldCoordinates,
							imageCoordinates,
							pointCounts,
							cvGetSize(inputImage_buff),
							intrinsicMatrix,
							distortionCoefficient,
							rotationVectors,
							translationVectors,
							CALIBRATE_CAMERA_FLAG
						);
						
						//情報をTextとして出力
						printf("\nレンズ歪み係数\n");
						saveRenseMatrix(distortionCoefficient);
						printMatrix("%lf", distortionCoefficient);
						
						//m_renseParameter.data = renseParameters;
												
						printf("\n内部パラメータ\n");
						saveInternalParameterMatrix(intrinsicMatrix);
						printMatrix("%lf ", intrinsicMatrix);

						//m_internalParameter.data = internalParameter;
						
						captureCount = 0;
						break;
						
					}
				}
			}

			if (findFlag != 0){
				InParameter = 1;
			}else if (findFlag == 0) {
				InParameter = 0;
			}
			
			//メモリ解除
			cvReleaseMat(&worldCoordinates);
			cvReleaseMat(&imageCoordinates);
			cvReleaseMat(&pointCounts);
			cvReleaseMat(&rotationVectors);
			cvReleaseMat(&translationVectors);
			cvReleaseImage(&grayImage);

		}
		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

	}
	//外部パターンを取得
	if (key == ' ' && m_inputImageIn.isNew() && InParameter == 1) {

		//行列の生成
		CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h), 3, CV_64FC1); //世界座標用行列
		CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h), 2, CV_64FC1); //画像座標用行列
		CvMat *rotationVectors = cvCreateMat(1, 3, CV_64FC1); //回転ベクトル
		CvMat *rotationMatrix = cvCreateMat(3, 3, CV_64FC1); //回転行列
		CvMat *translationVectors = cvCreateMat(1, 3, CV_64FC1); 

		//世界座標を設定する
		for (int i = 0; i < (m_board_w * m_board_h); i++){
			cvSetReal2D(worldCoordinates, i, 0, (i % m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 1, (i / m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 2, 0.0);
		}
	
		cvWaitKey( 1 );
	
		//	スペースキーが押されたら
		if ( findFlag != 0 ) {
			//	コーナーがすべて検出された場合
			//	画像座標を設定する
			for ( int i = 0; i < (m_board_w * m_board_h); i++ ){
				cvSetReal2D( imageCoordinates, i, 0, corners[i].x);
				cvSetReal2D( imageCoordinates, i, 1, corners[i].y);
			}

			//	外部パラメータを推定する
			cvFindExtrinsicCameraParams2(
				worldCoordinates,
				imageCoordinates,
				intrinsicMatrix,
				distortionCoefficient,
				rotationVectors,
				translationVectors
			);

			//	回転ベクトルを回転行列に変換する
			cvRodrigues2( rotationVectors, rotationMatrix, NULL );

			printf( "\n外部パラメータ\n" );
			printExtrinsicMatrix( rotationMatrix, translationVectors );
			saveExternalParameterMatrix(rotationMatrix, translationVectors);

			m_externalParameter.data = CORBA::string_dup(externalParameter);
			m_renseParameter.data = CORBA::string_dup(renseParameters);
			m_internalParameter.data = CORBA::string_dup(internalParameter);
						
		}
		//メモリを解放
		cvReleaseMat( &worldCoordinates );
		cvReleaseMat( &imageCoordinates );
		cvReleaseMat( &rotationVectors );
		cvReleaseMat( &rotationMatrix );
		cvReleaseMat( &translationVectors );
		
		//X,Y初期化
		cvInitUndistortMap(
			intrinsicMatrix,
			distortionCoefficient,
			mapx,
			mapy
		);
		//外部パラメータ確認フラグ
		outParameter = 1;
		key = 0;
				
	 }
	
	//内部外部パラメータの出力に成功したら
	if (InParameter == 1 && outParameter == 1) {

		//	レンズ歪みを補正した画像を生成する
		cvUndistort2(
			inputImage_buff,
			undistortionImage,
			intrinsicMatrix,
			distortionCoefficient
		);

		//cvShowImage("歪み補正", undistortionImage);

		//OutPortに補正映像を出力する。
		//int len = undistortionImage->nChannels * undistortionImage->width * undistortionImage->height;
		//m_calbImage.pixels.length(len);
		
		//歪み補正映像をOutPortとしてメモリコピーする。
		//memcpy((void *)&(m_calbImage.pixels[0]), undistortionImage->imageData, len);
		//m_calbImageOut.write();
		
		//鳥瞰図の座標設定
		objPts[0].x = 0;					objPts[0].y = 0;
		objPts[1].x = m_board_w-1;			objPts[1].y = 0;
		objPts[2].x = 0;					objPts[2].y = m_board_h-1;
		objPts[3].x = m_board_w-1;			objPts[3].y = m_board_h-1;
		
		//取得するCornerを設定
		imgPts[0] = corners[0];
		imgPts[1] = corners[m_board_w - 1];
		imgPts[2] = corners[(m_board_h - 1) * m_board_w];
		imgPts[3] = corners[(m_board_h - 1) * m_board_w + m_board_w - 1];
		
		//指定したCornerに○を作成する
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0,0,255), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0,255,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255,0,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255,255,0), 3);

		CvMat *H = cvCreateMat(3, 3, CV_32F);
		cvGetPerspectiveTransform(objPts, imgPts, H);
		
		//高さを設定する。
		CV_MAT_ELEM(*H, float, 2, 2) = m_camera_Height;
		
		//Warppingを実行
		cvWarpPerspective(inputImage_buff, birds_image, H, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		
		//鳥瞰図をOutPortに出力する。
		int len = birds_image->nChannels * birds_image->width * birds_image->height;
		m_birdImage.pixels.length(len);
		memcpy((void *)&(m_birdImage.pixels[0]), birds_image->imageData, len);

		m_birdImage.width = inputImage_buff->width;
		m_birdImage.height = inputImage_buff->height;

		m_birdImageOut.write();

		cvWaitKey(10);

		//cvShowImage("Bird_Eye", birds_image);
		cvReleaseMat(&H);

		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

		key = 0;

	}
Esempio n. 12
0
static void calib(void)
{
    ARParam  param;
    CvMat    *objectPoints;
    CvMat    *imagePoints;
    CvMat    *pointCounts;
    CvMat    *intrinsics;
    CvMat    *distortionCoeff;
    CvMat    *rotationVectors;
    CvMat    *translationVectors;
    CvMat    *rotationVector;
    CvMat    *rotationMatrix;
    float    intr[3][4];
    float    dist[4];
    ARdouble trans[3][4];
    ARdouble cx, cy, cz, hx, hy, h, sx, sy, ox, oy, err;
    int      i, j, k, l;

    objectPoints       = cvCreateMat(capturedImageNum * chessboardCornerNumX * chessboardCornerNumY, 3, CV_32FC1);
    imagePoints        = cvCreateMat(capturedImageNum * chessboardCornerNumX * chessboardCornerNumY, 2, CV_32FC1);
    pointCounts        = cvCreateMat(capturedImageNum, 1, CV_32SC1);
    intrinsics         = cvCreateMat(3, 3, CV_32FC1);
    distortionCoeff    = cvCreateMat(1, 4, CV_32FC1);
    rotationVectors    = cvCreateMat(capturedImageNum, 3, CV_32FC1);
    translationVectors = cvCreateMat(capturedImageNum, 3, CV_32FC1);
    rotationVector     = cvCreateMat(1, 3, CV_32FC1);
    rotationMatrix     = cvCreateMat(3, 3, CV_32FC1);

    l = 0;

    for (k = 0; k < capturedImageNum; k++)
    {
        for (i = 0; i < chessboardCornerNumX; i++)
        {
            for (j = 0; j < chessboardCornerNumY; j++)
            {
                ((float*)(objectPoints->data.ptr + objectPoints->step * l))[0] = patternWidth * i;
                ((float*)(objectPoints->data.ptr + objectPoints->step * l))[1] = patternWidth * j;
                ((float*)(objectPoints->data.ptr + objectPoints->step * l))[2] = 0.0f;

                ((float*)(imagePoints->data.ptr + imagePoints->step * l))[0] = cornerSet[l].x;
                ((float*)(imagePoints->data.ptr + imagePoints->step * l))[1] = cornerSet[l].y;

                l++;
            }
        }

        ((int*)(pointCounts->data.ptr))[k] = chessboardCornerNumX * chessboardCornerNumY;
    }

    cvCalibrateCamera2(objectPoints, imagePoints, pointCounts, cvSize(xsize, ysize),
                       intrinsics, distortionCoeff, rotationVectors, translationVectors, 0);

    for (j = 0; j < 3; j++)
    {
        for (i = 0; i < 3; i++)
        {
            intr[j][i] = ((float*)(intrinsics->data.ptr + intrinsics->step * j))[i];
        }

        intr[j][3] = 0.0f;
    }

    for (i = 0; i < 4; i++)
    {
        dist[i] = ((float*)(distortionCoeff->data.ptr))[i];
    }

    convParam(intr, dist, xsize, ysize, &param); // COVHI10434 ignored.
    arParamDisp(&param);

    l = 0;

    for (k = 0; k < capturedImageNum; k++)
    {
        for (i = 0; i < 3; i++)
        {
            ((float*)(rotationVector->data.ptr))[i] = ((float*)(rotationVectors->data.ptr + rotationVectors->step * k))[i];
        }

        cvRodrigues2(rotationVector, rotationMatrix);

        for (j = 0; j < 3; j++)
        {
            for (i = 0; i < 3; i++)
            {
                trans[j][i] = ((float*)(rotationMatrix->data.ptr + rotationMatrix->step * j))[i];
            }

            trans[j][3] = ((float*)(translationVectors->data.ptr + translationVectors->step * k))[j];
        }

        // arParamDispExt(trans);

        err = 0.0;

        for (i = 0; i < chessboardCornerNumX; i++)
        {
            for (j = 0; j < chessboardCornerNumY; j++)
            {
                cx = trans[0][0] * patternWidth * i + trans[0][1] * patternWidth * j + trans[0][3];
                cy = trans[1][0] * patternWidth * i + trans[1][1] * patternWidth * j + trans[1][3];
                cz = trans[2][0] * patternWidth * i + trans[2][1] * patternWidth * j + trans[2][3];
                hx = param.mat[0][0] * cx + param.mat[0][1] * cy + param.mat[0][2] * cz + param.mat[0][3];
                hy = param.mat[1][0] * cx + param.mat[1][1] * cy + param.mat[1][2] * cz + param.mat[1][3];
                h  = param.mat[2][0] * cx + param.mat[2][1] * cy + param.mat[2][2] * cz + param.mat[2][3];
                if (h == 0.0)
                    continue;

                sx = hx / h;
                sy = hy / h;
                arParamIdeal2Observ(param.dist_factor, sx, sy, &ox, &oy, param.dist_function_version);
                sx   = ((float*)(imagePoints->data.ptr + imagePoints->step * l))[0];
                sy   = ((float*)(imagePoints->data.ptr + imagePoints->step * l))[1];
                err += (ox - sx) * (ox - sx) + (oy - sy) * (oy - sy);
                l++;
            }
        }

        err = sqrt(err / (chessboardCornerNumX * chessboardCornerNumY));
        ARLOG("Err[%2d]: %f[pixel]\n", k + 1, err);
    }

    saveParam(&param);

    cvReleaseMat(&objectPoints);
    cvReleaseMat(&imagePoints);
    cvReleaseMat(&pointCounts);
    cvReleaseMat(&intrinsics);
    cvReleaseMat(&distortionCoeff);
    cvReleaseMat(&rotationVectors);
    cvReleaseMat(&translationVectors);
    cvReleaseMat(&rotationVector);
    cvReleaseMat(&rotationMatrix);
}
Esempio n. 13
0
int main()
{

	if(run_tests_only)
	{
		MyLine3D::runTest();
		return 0;
	}

	//CvMat *camera_inner_calibration_matrix; 
	bool show_surf_example=false;
	bool show_calibration_from_camera_and_undistortion=false;
	if(show_calibration_from_camera_and_undistortion)
	{
		CvMat *object_points_all=0;
		CvMat *image_points_all=0;
		CvMat *points_count_all=0;
		CvMat *camera_matr=0;
		CvMat *distor_coefs=0;
		CvMat *rotation_vecs=0;
		CvMat *transpose_vecs=0;
		vector<CvPoint2D32f> qu_calibr_points;
		IplImage* frameCam1;
		cvNamedWindow("WindowCam1",CV_WINDOW_KEEPRATIO);
		CvCapture *captureCam1=cvCreateCameraCapture(0);
		IplImage *quarterFrame;
		CvPoint2D32f *cornersFounded= new CvPoint2D32f[100];
		int cornersCount=0;
		int result_Found=0;
		// getting snapshots for inner camera calibration from video camera
		bool capture_flag=false;
		while(true)
		{
			frameCam1=cvQueryFrame(captureCam1);
			quarterFrame=cvCreateImage(cvSize((frameCam1->width),(frameCam1->height)),IPL_DEPTH_8U,3);
		
			cvCopy(frameCam1,quarterFrame);
			if(capture_flag)
			{
				result_Found=cvFindChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,&cornersCount);//,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS |CV_CALIB_CB_FAST_CHECK);
				cvDrawChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,cornersCount,result_Found);
				if(result_Found>0)
					AddPointsToInnerCalibrate(qu_calibr_points,cornersFounded,cornersCount);
				capture_flag=false;
				cvShowImage("WindowCam1",quarterFrame);
				if(result_Found>0)
					cvWaitKey(0);
			}
			char c=cvWaitKey(33);
			if(c==27)
				break;
			if(c==32 || c=='y' || c=='Y')
				capture_flag=true;
			cvShowImage("WindowCam1",quarterFrame);
			cvReleaseImage(&quarterFrame);
		
		}
		cvReleaseImage(&quarterFrame);
	
		cvReleaseCapture(&captureCam1);
		cvDestroyWindow("WindowCam1");
	
		PrintAllPointsForInnerCalibrate(qu_calibr_points,chess_b_szW*chess_b_szH);
		InitCvMatPointsParametersForInnerCallibration_part1(qu_calibr_points,chess_b_szW*chess_b_szH,object_points_all,image_points_all,points_count_all,chess_b_szW,chess_b_szH);
		InitOtherCameraParametersForInnerCallibration_part2(qu_calibr_points.size()/(chess_b_szW*chess_b_szH),camera_matr,distor_coefs,rotation_vecs,transpose_vecs);
		double calibration_error_result=cvCalibrateCamera2(object_points_all,
													image_points_all,
													points_count_all,
													cvSize(imgW,imgH),
													camera_matr,
													distor_coefs,
													rotation_vecs,
													transpose_vecs,
													CV_CALIB_FIX_PRINCIPAL_POINT|CV_CALIB_FIX_ASPECT_RATIO|CV_CALIB_ZERO_TANGENT_DIST
													);
		WriteMatrixCoef(camera_matr);
		WriteMatrixCoef(distor_coefs);
		//camera_inner_calibration_matrix=cvCreateMat(3,3,CV_32FC1);
		//cvCopy(camera_matr,camera_inner_calibration_matrix);
		cvSave("camera_calibration_inner.txt",camera_matr,"camera_inner_calibration_matrix");
		cvSave("camera_calibration_dist.txt",distor_coefs,"distor_coefs","coeficients of distortions");
		cout<<"Total Error:"<<calibration_error_result<<endl;
		cout<<"Average Calibration Error :"<<(calibration_error_result)/qu_calibr_points.size()<<endl;
	//undistortion example
		IplImage *frame_cur;
		IplImage *undistor_image;
		cvNamedWindow("cameraUndistor",CV_WINDOW_KEEPRATIO);
		CvCapture *captureCam2=cvCreateCameraCapture(0);
		bool undist_flag=false;
		while(true)
		{
			frame_cur= cvQueryFrame(captureCam2);
			undistor_image=cvCreateImage(cvSize((frame_cur->width),(frame_cur->height)),IPL_DEPTH_8U,3);
			if(undist_flag)
			{
				cvUndistort2(frame_cur,undistor_image,camera_matr,distor_coefs);
			}
			else
			{
				cvCopy(frame_cur,undistor_image);
			}
			cvShowImage("cameraUndistor",undistor_image);
			char c=cvWaitKey(33);
			if(c==27)
				break;
			if(c=='u'||c=='U')
				undist_flag=!undist_flag;

			cvReleaseImage(&undistor_image);

		}
		cvReleaseImage(&undistor_image);
		cvReleaseCapture(&captureCam2);
		cvDestroyWindow("cameraUndistor");
	}//ending undistortion_example
	
	if(show_surf_example)
	{
		//using SURF
		
		initModule_nonfree();// added at 16.04.2013
		CvCapture* capture_cam_3=cvCreateCameraCapture(0);
		cvNamedWindow("SURF from Cam",CV_WINDOW_KEEPRATIO);
		cvCreateTrackbar("Hessian Level","SURF from Cam",0,1000,onTrackbarSlide1);
		IplImage* buf_frame_3=0;
		IplImage* gray_copy=0;
		IplImage* buf_frame_3_copy=0;
	
		CvSeq *kp1,*descr1;
		CvMemStorage *storage=cvCreateMemStorage(0);
	
		CvSURFPoint *surf_pt;
		bool surf_flag=false;
		while(true)
		{
			buf_frame_3=cvQueryFrame(capture_cam_3);
		
			if(surf_flag)
			{
				surf_flag=false;
				gray_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,1);
				buf_frame_3_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,3);
			
				cvCvtColor(buf_frame_3,gray_copy,CV_RGB2GRAY);
				//cvSetImageROI(gray_copy,cvRect(280,200,40,40));
				cvExtractSURF(gray_copy,NULL,&kp1,&descr1,storage,cvSURFParams(0.0,0));
				cvReleaseImage(&gray_copy);
				re_draw=true;
			
				while(true)
				{
					if(re_draw)
					{
			
						cvCopy(buf_frame_3,buf_frame_3_copy);
						double pi=acos(-1.0);
						for(int i=0;i<kp1->total;i++)
						{
							surf_pt=(CvSURFPoint*)cvGetSeqElem(kp1,i);
							if(surf_pt->hessian<min_hessian)
								continue;
							int pt_x,pt_y;
							pt_x=(int)(surf_pt->pt.x);
							pt_y=(int)(surf_pt->pt.y);
							int sz=surf_pt->size;
							int rad_angle=(surf_pt->dir*pi)/180;
				
							cvCircle(buf_frame_3_copy,cvPoint(pt_x,pt_y),1/*sz*/,CV_RGB(0,255,0));
							cvLine(buf_frame_3_copy,cvPoint(pt_x,pt_y),cvPoint(pt_x+sz*cosl(rad_angle),pt_y-sz*sinl(rad_angle)),CV_RGB(0,0,255));
						}
						cvShowImage("SURF from Cam",buf_frame_3_copy);
					
					}
					char c=cvWaitKey(33);
					if(c==27)
					{
					
					
						break;
					}
				}
				cvReleaseImage(&buf_frame_3_copy);
			}
			
			cvShowImage("SURF from Cam",buf_frame_3);
			char ch=cvWaitKey(33);
			if(ch==27)
				break;
			if(ch==32)
				surf_flag=true;
		
		}
		if(gray_copy!=0)
			cvReleaseImage(&gray_copy);
		cvReleaseCapture(&capture_cam_3);
		cvDestroyWindow("SURF from Cam");
	}//ending SURF_example

	CvFont my_font=cvFont(1,1);
	cvInitFont(&my_font,CV_FONT_HERSHEY_SIMPLEX,1.0,1.0);

	cvNamedWindow("twoSnapshots",CV_WINDOW_KEEPRATIO);
	cvCreateTrackbar("Select LLine","twoSnapshots",0,1000,onTrackbarSlideSelectLine);
	CvCapture *capture_4 = 0;
	
	IplImage* left_img=0;
	IplImage* right_img=0;
	IplImage* cur_frame_buf=0;
	IplImage* gray_img_left=0;
	IplImage* gray_img_right=0;
	IplImage* merged_images=0;
	IplImage* merged_images_copy=0;
	CvMat *fundamentalMatrix = 0;
	vector<KeyPoint> key_points_left;
	Mat descriptors_left; 
	vector<KeyPoint> key_points_right;
	Mat descriptors_right;
	//CvMemStorage *mem_stor=cvCreateMemStorage(0);*/
	float min_hessian_value=1001.0f;

	double startValueOfFocus = 350;

	char* left_image_file_path = "camera_picture_left.png";
	char* right_image_file_path = "camera_picture_right.png";

	Array left_points, right_points;
	left_points.init(1,1);
	right_points.init(1,1);
	Array forReconstructionLeftPoints, forReconstructionRightPoints;
	forReconstructionLeftPoints.init(1,1);
	forReconstructionRightPoints.init(1,1);

	

	while(true)
	{
		char ch=cvWaitKey(33);
		if(ch==27)
			break;
		// open left and right images
		if(ch == 'o' || ch == 'O')
		{
			openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img );
			MergeTwoImages(left_img,right_img,merged_images);
		}
		// save both left and right images from camera
		if(ch == 's' || ch == 'S')
		{
			if( left_img != 0 )
				cvSaveImage(left_image_file_path, left_img);
			if( right_img != 0)
				cvSaveImage(right_image_file_path, right_img);
		}

		if(ch=='l'||ch=='L')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			
			cur_frame_buf=cvQueryFrame(capture_4);
			if(left_img==0)
				left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
			cvCopy(cur_frame_buf,left_img);

			if(right_img == 0)
			{
				right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
				cvCopy(cur_frame_buf,right_img);
			}

			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='r'||ch=='R')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			cur_frame_buf=cvQueryFrame(capture_4);
			if(right_img==0)
				right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
			cvCopy(cur_frame_buf,right_img);

			if(left_img == 0)
			{
				left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
				cvCopy(cur_frame_buf,left_img);
			}
			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='b'||ch=='B')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			cur_frame_buf=cvQueryFrame(capture_4);
			cvCopy(cur_frame_buf,left_img);
			cvCopy(cur_frame_buf,right_img);
		}
		if(ch=='q'||ch=='Q' && left_img!=0)
		{
			//proceed left
			extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left);

		}
		if(ch=='w'||ch=='W' && right_img!=0)
		{
			//proceed right
			extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right);			

		}
		if(ch=='m'||ch=='M' && left_img!=0 && right_img!=0)
		{
			//merge two images in to bigger one
			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='c'||ch=='C' && merged_images!=0)
		{
			//comparison of two images
			if(fundamentalMatrix != 0)
			{
				cvReleaseMat(& fundamentalMatrix);
				fundamentalMatrix = 0;
			}
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();
			
			GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points);
		}

		if(ch == 'E' || ch == 'e')
		{
			//drawing lines for corresponding points
			KeyPoint *leftPoint,*rightPoint,*leftPoint2,*rightPoint2;
			int width_part=merged_images->width>>1;
			/*for(int iL=0;iL<left_to_right_corresponding_points.size();iL++)
			{
				leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left,left_to_right_corresponding_points[iL].first);
				rightPoint=(CvSURFPoint*)cvGetSeqElem(key_points_right,left_to_right_corresponding_points[iL].second);
				cvLine(merged_images,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(255,0,0));
			}*/
			
			int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size();
			bool* acceptedLeftToRightCorrespondings = 0;
			getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points,
				key_points_left,
				key_points_right,
				fundamentalMatrix,
				acceptedLeftToRightCorrespondings,
				sizeOfAccepptedLeftToRightCorrespondings);

			
			while(true)
			{
				merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3);
				cvCopy(merged_images,merged_images_copy);
				int iL=selectedLeftLine;
				int iR=iL;
				if(iL>=left_to_right_corresponding_points.size())
					iL=left_to_right_corresponding_points.size()-1;
				if(iR>=right_to_left_corresponding_points.size())
					iR=right_to_left_corresponding_points.size()-1;
				char str[100]={0};
				if(iL >= 0 )
				{
					bool isLeftToRightLineIsAccepted = acceptedLeftToRightCorrespondings[iL];
				
					// difference value
					sprintf(str,"%f",left_to_right_corresponding_points[iL].comparer_value);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-40),&my_font,CV_RGB(0,255,0));
					// count of Matches
					sprintf(str,"%d",left_to_right_corresponding_points[iL].counterOfMatches);
					cvPutText(merged_images_copy,str,cvPoint(200,merged_images_copy->height-40),&my_font,CV_RGB(255,255,0));
					// median of compared values
					sprintf(str,"%lf",left_to_right_corresponding_points[iL].medianOfComparedMatches);
					cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0));

					// Variance of compared values
					sprintf(str,"V=%lf",left_to_right_corresponding_points[iL].Variance());
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0));

					// Standard deviation of compared values
					sprintf(str,"SD=%lf",sqrt( left_to_right_corresponding_points[iL].Variance() ));
					cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0));

					double SD = sqrt( left_to_right_corresponding_points[iL].Variance() ) ;
					double median = left_to_right_corresponding_points[iL].medianOfComparedMatches;
					double compValue = left_to_right_corresponding_points[iL].comparer_value;
					double mark_1_5 = median - 1.5 * SD - compValue;

					// Mark 1.5
					sprintf(str,"m1.5=%lf", mark_1_5);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-120),&my_font,CV_RGB(0,255,0));

					sprintf(str,"angle=%lf", left_to_right_corresponding_points[iL].degreesBetweenDeltaVector);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-150),&my_font,CV_RGB(0,255,0));

					

					leftPoint= &(key_points_left[ left_to_right_corresponding_points[iL].comp_pair.first ]);
					rightPoint=&(key_points_right[ left_to_right_corresponding_points[iL].comp_pair.second ]);
				
					cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(0,255,0));

					drawEpipolarLinesOnLeftAndRightImages(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y),
						cvPoint(rightPoint->pt.x,rightPoint->pt.y), fundamentalMatrix);

					CvScalar color = CV_RGB(255, 0, 0);
					if(isLeftToRightLineIsAccepted)
					{
						color = CV_RGB(0,255,0);
					}

					cvCircle(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y), 5, color);
					cvCircle(merged_images_copy, cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y), 5, color);
				}
				//cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x,rightPoint->pt.y),CV_RGB(255,0,255));
				if(iR >= 0 )
				{
					sprintf(str,"%f",right_to_left_corresponding_points[iR].comparer_value);
					cvPutText(merged_images_copy,str,cvPoint(width_part,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0));
					rightPoint2= &(key_points_right [right_to_left_corresponding_points[iR].comp_pair.first]);
					leftPoint2= &(key_points_left [right_to_left_corresponding_points[iR].comp_pair.second]);
					cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,0));
				}
				//cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x+width_part,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,255));
				
				cvShowImage("twoSnapshots",merged_images_copy);
				cvReleaseImage(&merged_images_copy);
				char ch2=cvWaitKey(33);
				if(ch2==27)
					break;
				if(ch2=='z' && selectedLeftLine>0)
				{
					selectedLeftLine--;
				}
				if(ch2=='x' && selectedLeftLine<1000)
				{
					selectedLeftLine++;
				}
				if( ch2 == 'a' || ch2 == 'A')
				{
					acceptedLeftToRightCorrespondings[selectedLeftLine] = true;
				}
				if( ch2 == 'd' || ch2 == 'D')
				{
					acceptedLeftToRightCorrespondings[selectedLeftLine] = false;
				}
			}//end of while(true)

			SaveAcceptedCorresspondings(
					left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings
					);
			ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings,
					left_points,
					right_points
					);


			delete[] acceptedLeftToRightCorrespondings;
		}
		if( ch == 'T' || ch == 't')
		{
			clock_t startTime = clock();

			openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img );
			// proceed left
			extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left);
			//proceed right
			extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right);	
			//comparison of two images
			if(fundamentalMatrix != 0)
			{
				cvReleaseMat(& fundamentalMatrix);
				fundamentalMatrix = 0;
			}
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();
			
			GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points);

			// searching fundamental matrix and corresponding points
			findFundamentalMatrixAndCorrespondingPointsForReconstruction(
				left_to_right_corresponding_points,
				right_to_left_corresponding_points,
				fundamentalMatrix,
				key_points_left,
				key_points_right,
				descriptors_left,
				descriptors_right,
				left_img,
				right_img,
				gray_img_left,
				gray_img_right,
				forReconstructionLeftPoints,
				forReconstructionRightPoints,
				min_hessian_value, 450);
			// selecting points for finding model parameters

			int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size();
			bool* acceptedLeftToRightCorrespondings = 0;
			getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points,
				key_points_left,
				key_points_right,
				fundamentalMatrix,
				acceptedLeftToRightCorrespondings,
				sizeOfAccepptedLeftToRightCorrespondings);

			ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings,
					left_points,
					right_points
					);

			delete[] acceptedLeftToRightCorrespondings;

			// start process of determination parameters of model and reconstruction of scene
			cv::Mat mat_left_img(left_img, true);
			cv::Mat mat_right_img(right_img, true);
			mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, 
				mat_left_img, mat_right_img,
				forReconstructionLeftPoints, forReconstructionRightPoints);
			mat_left_img.release();
			mat_right_img.release();


			cout << "Code execution time: "<< double( clock() - startTime ) / (double)CLOCKS_PER_SEC<< " seconds." << endl;
		}
		if( ch == 'I' || ch == 'i')
		{	

			//-- Step 3: Matching descriptor vectors using FLANN matcher
			FlannBasedMatcher matcher;
			std::vector< DMatch > matches;
			matcher.match( descriptors_left, descriptors_right, matches );

			//double max_dist = 0; double min_dist = 100;

			////-- Quick calculation of max and min distances between keypoints
			//for( int i = 0; i < descriptors_left.rows; i++ )
			//{ double dist = matches[i].distance;
			//	if( dist < min_dist ) min_dist = dist;
			//	if( dist > max_dist ) max_dist = dist;
			//}

			//printf("-- Max dist : %f \n", max_dist );
			//printf("-- Min dist : %f \n", min_dist );

			//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
			//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
			//-- small)
			//-- PS.- radiusMatch can also be used here.
			//std::vector< DMatch > good_matches;
			
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();

			for( int i = 0; i < descriptors_left.rows; i++ )
			{ 
				//if( matches[i].distance <= max(2*min_dist, 0.02) )
				{
					//good_matches.push_back( matches[i]); 
					left_to_right_corresponding_points.push_back( ComparedIndexes(matches[i].distance, pair<int, int> (i, matches[i].trainIdx)) );
				}
			}
			
			cout<< "Count of good matches :" << left_to_right_corresponding_points.size() << endl;

			stable_sort(left_to_right_corresponding_points.begin(),left_to_right_corresponding_points.end(),my_comparator_for_stable_sort);
		}

		//if( ch == 'K' || ch == 'k')
		//{
		//	CvSURFPoint *leftPoint;
		//	//proceed left
		//	gray_img_left=cvCreateImage(cvSize((left_img->width),(left_img->height)),IPL_DEPTH_8U,1);
		//	cvCvtColor(left_img,gray_img_left,CV_RGB2GRAY);
		//	cvExtractSURF(gray_img_left,NULL,&key_points_left,&descriptors_left,mem_stor,cvSURFParams(min_hessian_value,0));

		//	cv::Mat mat_gray_leftImage(gray_img_left, true);
		//	cvReleaseImage(&gray_img_left);
		//	// proceed right
		//	gray_img_right=cvCreateImage(cvSize((right_img->width),(right_img->height)),IPL_DEPTH_8U,1);
		//	cvCvtColor(right_img,gray_img_right,CV_RGB2GRAY);
		//	cv::Mat mat_gray_rightImage(gray_img_right, true);
		//	cvReleaseImage(&gray_img_right);
		//	vector<Point2f> LK_left_points;
		//	vector<Point2f> LK_right_points;

		//	LK_right_points.resize(key_points_left->total);

		//	for( int i = 0; i < key_points_left->total; i++)
		//	{
		//		leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left, i);
		//		LK_left_points.push_back(Point2f( leftPoint->pt.x, leftPoint->pt.y));
		//	}
		//	
		//	vector<uchar> status;
  //          vector<float> err;

		//	cv::calcOpticalFlowPyrLK(
		//		mat_gray_leftImage,
		//		mat_gray_rightImage, 
		//		LK_left_points,
		//		LK_right_points, 
		//		status,
		//		err);
		//	int width_part=merged_images->width>>1;
		//	
		//	float minErr = err[0];

		//	for(int k = 0; k < err.size(); k++)
		//	{
		//		if(status[k] && err[k] < minErr) 
		//		{
		//			minErr = err[k];
		//		}
		//	}

		//	cout<< "Lucass Kanade min error: " << minErr<< endl;

		//	int i = 0;
		//	merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3);
		//	cvCopy(merged_images,merged_images_copy);
		//	for(; i < LK_left_points.size(); ++i)
		//	{
		//		if(err[i] < 5 * minErr && status[i])
		//		{
		//			cvLine(merged_images_copy,cvPoint(LK_left_points[i].x,LK_left_points[i].y),cvPoint(LK_right_points[i].x+width_part,LK_right_points[i].y),
		//					CV_RGB(100 + (( i *3) % 155), 100+ ((i*7)%155), 100+ ((i*13)%155)));
		//		}
		//	}

		//	cvShowImage("twoSnapshots",merged_images_copy);
		//		
		//	while(true)
		//	{

		//		char ch2=cvWaitKey(33);
		//		if(ch2==27)
		//			break;
		//		
		//	}
		//	
		//	cvReleaseImage(&merged_images_copy);

		//	status.clear();
		//	err.clear();
		//	LK_left_points.clear();
		//	LK_right_points.clear();
		//	mat_gray_leftImage.release();
		//	mat_gray_rightImage.release();
		//}

		if( ch == 'F' || ch == 'f')
		{
			findFundamentalMatrixAndCorrespondingPointsForReconstruction(
				left_to_right_corresponding_points,
				right_to_left_corresponding_points,
				fundamentalMatrix,
				key_points_left,
				key_points_right,
				descriptors_left,
				descriptors_right,
				left_img,
				right_img,
				gray_img_left,
				gray_img_right,
				forReconstructionLeftPoints,
				forReconstructionRightPoints,
				min_hessian_value);


		}
		if( ch == 'P' || ch == 'p')
		{
			cv::Mat mat_left_img(left_img, true);
			cv::Mat mat_right_img(right_img, true);
			mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, 
				mat_left_img, mat_right_img,
				forReconstructionLeftPoints, forReconstructionRightPoints);
			mat_left_img.release();
			mat_right_img.release();
		}
		if(merged_images!=0)
		{
			cvShowImage("twoSnapshots",merged_images);
		}
		
	}
Esempio n. 14
0
t_jit_err cv_jit_calibration_matrix_calc(t_cv_jit_calibration *x, void *inputs, void *outputs)
{
	t_jit_err			err = JIT_ERR_NONE;
	long				in_savelock;
	long				out_savelock;
	t_jit_matrix_info	in_minfo; 
	t_jit_matrix_info	out_minfo;
	char				*in_bp;
	char				*out_bp;
	void				*in_matrix;
	void				*out_matrix;
	CvMat				in_cv,out_cv;
	
	in_matrix 	= jit_object_method(inputs,_jit_sym_getindex,0);
	out_matrix 	= jit_object_method(outputs,_jit_sym_getindex,0);
	
	if (x && in_matrix && out_matrix) {
		in_savelock = (long) jit_object_method(in_matrix, _jit_sym_lock, 1);
		out_savelock = (long) jit_object_method(out_matrix, _jit_sym_lock, 1);
		
		jit_object_method(in_matrix, _jit_sym_getinfo, &in_minfo);
		jit_object_method(out_matrix, _jit_sym_getinfo, &out_minfo);
		
		jit_object_method(in_matrix, _jit_sym_getdata, &in_bp);
		jit_object_method(out_matrix, _jit_sym_getdata, &out_bp);
		
		if (!in_bp) { 
			err=JIT_ERR_INVALID_INPUT; 
			goto out;
		}
		if (!out_bp) {
			err=JIT_ERR_INVALID_OUTPUT; 
			goto out;
		}		
		if (in_minfo.type != _jit_sym_char) {
			err = JIT_ERR_MISMATCH_TYPE; 
			goto out;
		}
		
		if (in_minfo.planecount != 4 && in_minfo.planecount != 1) {
			err=JIT_ERR_MISMATCH_PLANE; 
			goto out;
		}
		
		if (in_minfo.dimcount != 2) {
			err=JIT_ERR_MISMATCH_DIM; 
			goto out;
		}
		
		if ( x->dim[0] != in_minfo.dim[0] || x->dim[1] != in_minfo.dim[1]) {
			x->dim[0] = in_minfo.dim[0];
			x->dim[1] = in_minfo.dim[1];
			cv_jit_calibration_load_param(x);
			
		}
		
		cvJitter2CvMat(in_matrix,	&in_cv); // convert Jitter matrix into CvMat
		cvJitter2CvMat(out_matrix, &out_cv);
		
		// this will loop until we got enought views (x->board_view_nb) with all corners visible
		if ( x->success_count < x->board_view_nb && x->calibration != 0 ) {
				cv_jit_calibration_findcorners(x, in_minfo, out_minfo, in_matrix, out_matrix, in_cv, out_bp );
		}
		if ( x->success_count >= x->board_view_nb && x->calibration != 0 ) {
			
			//CALIBRATE THE CAMERA! 
			cvCalibrateCamera2(x->object_points,
							   x->image_points,
							   x->point_counts,
							   cvSize( in_minfo.dim[0], in_minfo.dim[1] ),
							   x->intrinsic_matrix,
							   x->distortion_coeffs,
							   NULL, 
							   NULL,
							   0);
			
			//cv_jit_calibration_print_parameters(x);
			
			cv_jit_calibration_build_undistort_map(x);
			
			x->calibration = 0;
		}
		
		if (x->calibration == 0) {
			// undistort the input image
			cvRemap( &in_cv, &out_cv, x->mapx, x->mapy );			// Undistort image
		}
	} 
	else
		return JIT_ERR_INVALID_PTR;
	
out:
	jit_object_method(out_matrix,_jit_sym_lock,out_savelock);
	jit_object_method(in_matrix,_jit_sym_lock,in_savelock);
	return err;
}