Пример #1
0
void findExtrinsic(std::vector<CvPoint2D32f>& corners, int rows, int cols)
{

  CvMat *image_points_ex = cvCreateMat( corners.size(), 2, CV_64FC1);

  for (uint j = 0; j < corners.size(); j++){
    cvSetReal2D( image_points_ex, j, 0, corners[j].x);
    cvSetReal2D( image_points_ex, j, 1, corners[j].y);
  }

  //int views = 1;

  CvMat *object_points_ex = cvCreateMat( corners.size(), 3, CV_64FC1);
  for (uint j = 0; j < corners.size(); j++){
                cvSetReal2D( object_points_ex, j, 0, ( j % rows) * GRID_SIZE );
                cvSetReal2D( object_points_ex, j, 1, ( j / rows) * GRID_SIZE );
                cvSetReal2D( object_points_ex, j, 2, 0.0 );
        }

  //cvSetReal2D( itsCameraTranslation, 0, 2, 782.319961 );
  cvFindExtrinsicCameraParams2( object_points_ex,
      image_points_ex,
      itsIntrinsicMatrix,
      itsDistortionCoeffs,
      itsCameraRotation,
      itsCameraTranslation);

        cvReleaseMat( &image_points_ex);
  cvReleaseMat( &object_points_ex);

}
Пример #2
0
void calibrate ( CvMat* cornerPoints, CvMat* objectPoints,
		 CvMat* intrinsic, CvMat* distortion,
		 CvMat* rotation, CvMat* translation )
{

    CvMat* rotVector = cvCreateMat( 3, 1, CV_64F );
    cvFindExtrinsicCameraParams2( objectPoints, cornerPoints,
				  intrinsic, distortion, 
				  rotVector, translation );
    
    cvRodrigues2( rotVector, rotation, 0 );
    
    cvReleaseMat( &rotVector );
}
Пример #3
0
void Pattern::getExtrinsics(int patternSize, const Mat& cameraMatrix, const Mat& distortions)
{

	CvMat objectPts;//header for 3D points of pat3Dpts
	CvMat imagePts;//header for 2D image points of pat2Dpts
	CvMat intrinsics = cameraMatrix;
	CvMat distCoeff = distortions;
	CvMat rot = rotVec;
	CvMat tra = transVec;
	//		CvMat rotationMatrix = rotMat; // projectionMatrix = [rotMat tra];

	CvPoint2D32f pat2DPts[4];
	for (int i = 0; i<4; i++){
		pat2DPts[i].x = this->vertices.at(i).x;
		pat2DPts[i].y = this->vertices.at(i).y;
	}

	//3D points in pattern coordinate system
	CvPoint3D32f pat3DPts[4];
	pat3DPts[0].x = 0.0;
	pat3DPts[0].y = 0.0;
	pat3DPts[0].z = 0.0;
	pat3DPts[1].x = patternSize;
	pat3DPts[1].y = 0.0;
	pat3DPts[1].z = 0.0;
	pat3DPts[2].x = patternSize;
	pat3DPts[2].y = patternSize;
	pat3DPts[2].z = 0.0;
	pat3DPts[3].x = 0.0;
	pat3DPts[3].y = patternSize;
	pat3DPts[3].z = 0.0;

	cvInitMatHeader(&objectPts, 4, 3, CV_32FC1, pat3DPts);
	cvInitMatHeader(&imagePts, 4, 2, CV_32FC1, pat2DPts);

	//find extrinsic parameters
	/*cout << "objectPts is mat : " << CV_IS_MAT(&objectPts) << endl;
		cout << "imagePts is mat : " << CV_IS_MAT(&imagePts) << endl;
		cout << "intrinsics is mat : " << CV_IS_MAT(&intrinsics) << endl;
		cout << "distCoeff is mat : " << CV_IS_MAT(&distCoeff) << endl;
		cout << "rot is mat : " << CV_IS_MAT(&rot) << endl;
		cout << "tra is mat : " << CV_IS_MAT(&tra) << endl;*/
	cvFindExtrinsicCameraParams2(&objectPts, &imagePts, &intrinsics, &distCoeff, &rot, &tra);
}
Пример #4
0
/**
 * Trying to figure out how this works based on
 * http://opencv.willowgarage.com/documentation/camera_calibration_and_3d_reconstruction.html#findextrinsiccameraparams2
 */
void test_cvFindExtrinsicCameraParams2()
{
	// The array of object points in the object coordinate space, 3xN or Nx3 1-channel, or 1xN or Nx1 3-channel, where N is the number of points.
	CvMat* object_points = cvCreateMat( 3, 3, CV_32FC1);
	// The array of corresponding image points, 2xN or Nx2 1-channel or 1xN or Nx1 2-channel, where N is the number of points.
	CvMat* image_points  = cvCreateMat( 3, 3, CV_32FC1);

	// camera and lens model
	CvMat* intrinsic_matrix  = cvCreateMat( 3, 3, CV_32FC1);
	CvMat* distortion_coeffs = NULL; // If it is NULL, all of the distortion coefficients are set to 0

	// output
	CvMat* rotation_vector    = cvCreateMat( 3, 1, CV_32F);
	CvMat* translation_vector = cvCreateMat( 3, 1, CV_32F);

	// Using the RGB camera intrinsics calculated at http://nicolas.burrus.name/index.php/Research/KinectCalibration
	float fx_rgb = 5.2921508098293293e+02;
	float fy_rgb = 5.2556393630057437e+02;
	float cx_rgb = 3.2894272028759258e+02;
	float cy_rgb = 2.6748068171871557e+02;

	// Camera intrinsic matrix:
	//     [ fx, 0,  cx ]
	// K = [ 0,  fx, cy ]
	//     [ 0,  0,  1  ]
	*( (float*)CV_MAT_ELEM_PTR( *intrinsic_matrix, 0, 0 ) ) = fx_rgb;
	*( (float*)CV_MAT_ELEM_PTR( *intrinsic_matrix, 1, 1 ) ) = fy_rgb;
	*( (float*)CV_MAT_ELEM_PTR( *intrinsic_matrix, 0, 2 ) ) = cx_rgb;
	*( (float*)CV_MAT_ELEM_PTR( *intrinsic_matrix, 1, 2 ) ) = cy_rgb;
	*( (float*)CV_MAT_ELEM_PTR( *intrinsic_matrix, 2, 2 ) ) = 1.0;

	cvFindExtrinsicCameraParams2( object_points, image_points, intrinsic_matrix, distortion_coeffs, rotation_vector, translation_vector, 0);

	// release the data
	cvReleaseMat(&object_points);
	cvReleaseMat(&image_points);
	cvReleaseMat(&intrinsic_matrix);
	cvReleaseMat(&distortion_coeffs);
	cvReleaseMat(&rotation_vector);
	cvReleaseMat(&translation_vector);
}
Пример #5
0
/* Variant of the previous function that takes double-precision parameters */
void cvFindExtrinsicCameraParams_64d( int point_count,
    CvSize, CvPoint2D64f* _image_points,
    CvPoint3D64f* _object_points, double* focal_length,
    CvPoint2D64f principal_point, double* _distortion_coeffs,
    double* _rotation_vector, double* _translation_vector )
{
    CvMat image_points = cvMat( point_count, 1, CV_64FC2, _image_points );
    CvMat object_points = cvMat( point_count, 1, CV_64FC3, _object_points );
    CvMat dist_coeffs = cvMat( 4, 1, CV_64FC1, _distortion_coeffs );
    double a[9];
    CvMat camera_matrix = cvMat( 3, 3, CV_64FC1, a );
    CvMat rotation_vector = cvMat( 1, 1, CV_64FC3, _rotation_vector );
    CvMat translation_vector = cvMat( 1, 1, CV_64FC3, _translation_vector );

    a[0] = focal_length[0]; a[4] = focal_length[1];
    a[2] = principal_point.x; a[5] = principal_point.y;
    a[1] = a[3] = a[6] = a[7] = 0.;
    a[8] = 1.;

    cvFindExtrinsicCameraParams2( &object_points, &image_points, &camera_matrix,
        &dist_coeffs, &rotation_vector, &translation_vector, 0 );
}
Пример #6
0
void calibrateCamera ( const char* intrinsicFileName, 
		      const char* extrinsicFileName,
		      CvMat* cornerPoints, CvMat* objectPoints ){

  CvMat* intrinsic = cvCreateMat( 3, 3, CV_64F );
  CvMat* distortion = cvCreateMat( 4, 1, CV_64F );

  CvMat* translation = cvCreateMat( 3, 1, CV_64F );

  CvMat* rotation = cvCreateMat( 3, 1, CV_64F );

  loadIntrinsicParams( intrinsicFileName, intrinsic, distortion );

  cvFindExtrinsicCameraParams2( objectPoints, cornerPoints, 
    intrinsic, distortion, rotation, translation, 0 );

  saveExtrinsicParams( extrinsicFileName, rotation, translation );
  
  cvReleaseMat( &intrinsic );
  cvReleaseMat( &distortion );
  cvReleaseMat( &rotation );
  cvReleaseMat( &translation );
}
Пример #7
0
int
main (int argc, char *argv[])
{
  int i, j, k;
  int corner_count, found;
  int p_count[IMAGE_NUM];
  IplImage *src_img[IMAGE_NUM];
  CvSize pattern_size = cvSize (PAT_COL, PAT_ROW);
  CvPoint3D32f objects[ALL_POINTS];
  CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * ALL_POINTS);
  CvMat object_points;
  CvMat image_points;
  CvMat point_counts;
  CvMat *intrinsic = cvCreateMat (3, 3, CV_32FC1);
  CvMat *rotation = cvCreateMat (1, 3, CV_32FC1);
  CvMat *translation = cvCreateMat (1, 3, CV_32FC1);
  CvMat *distortion = cvCreateMat (1, 4, CV_32FC1);

  // (1)キャリブレーション画像の読み込み
  for (i = 0; i < IMAGE_NUM; i++) {
    char buf[32];
    sprintf (buf, "calib_img/%02d.png", i);
    if ((src_img[i] = cvLoadImage (buf, CV_LOAD_IMAGE_COLOR)) == NULL) {
      fprintf (stderr, "cannot load image file : %s\n", buf);
    }
  }

  // (2)3次元空間座標の設定
  for (i = 0; i < IMAGE_NUM; i++) {
    for (j = 0; j < PAT_ROW; j++) {
      for (k = 0; k < PAT_COL; k++) {
        objects[i * PAT_SIZE + j * PAT_COL + k].x = j * CHESS_SIZE;
        objects[i * PAT_SIZE + j * PAT_COL + k].y = k * CHESS_SIZE;
        objects[i * PAT_SIZE + j * PAT_COL + k].z = 0.0;
      }
    }
  }
  cvInitMatHeader (&object_points, ALL_POINTS, 3, CV_32FC1, objects);

  // (3)チェスボード(キャリブレーションパターン)のコーナー検出
  int found_num = 0;
  cvNamedWindow ("Calibration", CV_WINDOW_AUTOSIZE);
  for (i = 0; i < IMAGE_NUM; i++) {
    found = cvFindChessboardCorners (src_img[i], pattern_size, &corners[i * PAT_SIZE], &corner_count);
    fprintf (stderr, "%02d...", i);
    if (found) {
      fprintf (stderr, "ok\n");
      found_num++;
    }
    else {
      fprintf (stderr, "fail\n");
    }
    // (4)コーナー位置をサブピクセル精度に修正,描画
    IplImage *src_gray = cvCreateImage (cvGetSize (src_img[i]), IPL_DEPTH_8U, 1);
    cvCvtColor (src_img[i], src_gray, CV_BGR2GRAY);
    cvFindCornerSubPix (src_gray, &corners[i * PAT_SIZE], corner_count,
                        cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
    cvDrawChessboardCorners (src_img[i], pattern_size, &corners[i * PAT_SIZE], corner_count, found);
    p_count[i] = corner_count;
    cvShowImage ("Calibration", src_img[i]);
    cvWaitKey (0);
  }
  cvDestroyWindow ("Calibration");

  if (found_num != IMAGE_NUM)
    return -1;
  cvInitMatHeader (&image_points, ALL_POINTS, 1, CV_32FC2, corners);
  cvInitMatHeader (&point_counts, IMAGE_NUM, 1, CV_32SC1, p_count);

  // (5)内部パラメータ,歪み係数の推定
  cvCalibrateCamera2 (&object_points, &image_points, &point_counts, cvSize (640, 480), intrinsic, distortion);

  // (6)外部パラメータの推定
  CvMat sub_image_points, sub_object_points;
  int base = 0;
  cvGetRows (&image_points, &sub_image_points, base * PAT_SIZE, (base + 1) * PAT_SIZE);
  cvGetRows (&object_points, &sub_object_points, base * PAT_SIZE, (base + 1) * PAT_SIZE);
  cvFindExtrinsicCameraParams2 (&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);

  // (7)XMLファイルへの書き出し
  CvFileStorage *fs;
  fs = cvOpenFileStorage ("camera.xml", 0, CV_STORAGE_WRITE);
  cvWrite (fs, "intrinsic", intrinsic);
  cvWrite (fs, "rotation", rotation);
  cvWrite (fs, "translation", translation);
  cvWrite (fs, "distortion", distortion);
  cvReleaseFileStorage (&fs);

  for (i = 0; i < IMAGE_NUM; i++) {
    cvReleaseImage (&src_img[i]);
  }

  return 0;
}
int
main (int argc, char *argv[])
{
	// IMAGE_NUM, PAT_ROW, PAT_COL,PAT_SIZE, ALL_POINTS, CHESS_SIZE

	/*
	if (argc < 6)
	{
		std::cout<< "ERROR : augment is incorrect" << std::endl;
		return -1;
	}
	*/

	//int PAT_ROW   	= atoi(argv[3]);
	//int PAT_COL   	= atoi(argv[4]);
	//int CHESS_SIZE	= atoi(argv[5]);
	//int PAT_SIZE = PAT_ROW*PAT_COL;
	char* NAME_IMG_IN		= argv[1];
	//char* NAME_XML_OUT	= argv[2];

	int i,j;
	int corner_count, found;
	IplImage *src_img;
	CvSize pattern_size = cvSize(PAT_COL, PAT_ROW);
	CvMat image_points;
	CvMat object_points;
	CvMat *intrinsic, *distortion;
	CvMat *rotation = cvCreateMat(1, 3, CV_32FC1);
	CvMat *rotationConv = cvCreateMat(3, 3, CV_32FC1);
	CvMat *translation = cvCreateMat(1, 3, CV_32FC1);
	CvPoint3D32f objects[PAT_SIZE];
	CvFileStorage *fs;
	CvFileNode *param;
	CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * PAT_SIZE);

	// (1)�����оݤȤʤ������ɤ߹���
	if ( ( src_img = cvLoadImage(NAME_IMG_IN, CV_LOAD_IMAGE_COLOR) ) == 0)
	//if (argc < 2 || (src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_COLOR)) == 0)
	{
		std::cout<< "ERROR : input image is not exist  or  augment is incorrect" << std::endl;
		return -1;
	}

	// 3�������ֺ�ɸ������
	for (i = 0; i < PAT_ROW; i++) {
		for (j = 0; j < PAT_COL; j++) {
			objects[i * PAT_COL + j].x = i * CHESS_SIZE;
			objects[i * PAT_COL + j].y = j * CHESS_SIZE;
			objects[i * PAT_COL + j].z = 0.0;
		}
	}
	cvInitMatHeader(&object_points, PAT_SIZE, 3, CV_32FC1, objects);

	// �������ܡ��ɡʥ����֥졼�����ѥ�����ˤΥ����ʡ�����
	int found_num = 0;
//	cvNamedWindow("Calibration", CV_WINDOW_AUTOSIZE);
	found = cvFindChessboardCorners(src_img, pattern_size, &corners[0], &corner_count);
	fprintf(stderr, "corner:%02d...\n", corner_count);
	if (found) {
		fprintf(stderr, "ok\n");
	} else {
		fprintf(stderr, "fail\n");
	}

	// (4)�����ʡ����֤򥵥֥ԥ��������٤˽���������
	IplImage *src_gray = cvCreateImage (cvGetSize (src_img), IPL_DEPTH_8U, 1);
	cvCvtColor (src_img, src_gray, CV_BGR2GRAY);
	cvFindCornerSubPix (src_gray, &corners[0], corner_count,
			cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
	cvDrawChessboardCorners (src_img, pattern_size, &corners[0], corner_count, found);

//	cvShowImage ("Calibration", src_img);
//	cvWaitKey (0);
//	cvDestroyWindow("Calibration");
	cvShowImage ("Calibration", src_img);

	cvInitMatHeader(&image_points, PAT_SIZE, 1, CV_32FC2, corners);


	// (2)�ѥ�᡼���ե�������ɤ߹���
	fs = cvOpenFileStorage ("xml/rgb.xml", 0, CV_STORAGE_READ);
	param = cvGetFileNodeByName (fs, NULL, "intrinsic");
	intrinsic = (CvMat *) cvRead (fs, param);
	param = cvGetFileNodeByName (fs, NULL, "distortion");
	distortion = (CvMat *) cvRead (fs, param);
	cvReleaseFileStorage (&fs);

	// (3) �����ѥ�᡼���ο���
	CvMat sub_image_points, sub_object_points;
	int base = 0;
	cvGetRows(&image_points, &sub_image_points, base * PAT_SIZE, (base + 1) * PAT_SIZE);
	cvGetRows(&object_points, &sub_object_points, base * PAT_SIZE, (base + 1)* PAT_SIZE);
	cvFindExtrinsicCameraParams2(&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);
	int ret = cvRodrigues2(rotation, rotationConv);

//	int cols = sub_object_points.rows;
//	printf("cols = %d\n", cols);
//	printf("%f\n",sub_object_points.data.fl[0]);

	// mm -> m
	for (i = 0; i < translation->cols; i++) { translation->data.fl[i] = translation->data.fl[i] / 1000;}

	// (4)XML�ե�����ؤν񤭽Ф�
	//fs = cvOpenFileStorage(argv[2], 0, CV_STORAGE_WRITE);
	fs = cvOpenFileStorage(NAME_XML_OUT, 0, CV_STORAGE_WRITE);
	cvWrite(fs, "rotation", rotationConv);
	cvWrite(fs, "translation", translation);
	cvReleaseFileStorage(&fs);

	/////////////////////////////////////////////////
	// write out py
	if(1)
	{
		cv::Mat ttt(translation);
		cv::Mat rrr(rotationConv);


		char data2Write[1024];
		char textFileName[256];
		sprintf( textFileName , "cbCoord/cbOneShot.py");
		std::ofstream outPy(textFileName);

		outPy << "import sys"						<<std::endl;
		outPy << "sys.path.append('../')"			<<std::endl;
		outPy << "from numpy import *"				<<std::endl;
		outPy << "from numpy.linalg import svd"	<<std::endl;
		outPy << "from numpy.linalg import inv"	<<std::endl;
		outPy << "from chessboard_points import *"<<std::endl;
		outPy << "sys.path.append('../geo')"		<<std::endl;
		outPy << "from geo import *"				<<std::endl;

		/*
		///////////////////////////////////////////////////////////////////////////////////
		// out translation and rotation as xyzabc list
		outPy << "xyzabc = []"	<<std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", ttt.at<float>(0) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", ttt.at<float>(1) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", ttt.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", rrr.at<float>(0) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", rrr.at<float>(1) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", rrr.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;
		// out translation and rotation as xyzabc list
		///////////////////////////////////////////////////////////////////////////////////
		 */

		///////////////////////////////////////////////////////////////////////////////////
		// out translation
		outPy << "ttt = []"	<<std::endl;

		sprintf( data2Write, "ttt.append(%f)", ttt.at<float>(0) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "ttt.append(%f)", ttt.at<float>(1) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "ttt.append(%f)", ttt.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;
		// out translation
		//////////////////////////////////////////////////////////////////////////////////////

		///////////////////////////////////////////////////////////////////////////////////
		// out rotation
		outPy << "rrr = []"	<<std::endl;

		sprintf( data2Write, "rrr.append([%f,%f,%f])", rrr.at<float>(0), rrr.at<float>(1), rrr.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "rrr.append([%f,%f,%f])", rrr.at<float>(3), rrr.at<float>(4), rrr.at<float>(5) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "rrr.append([%f,%f,%f])", rrr.at<float>(6), rrr.at<float>(7), rrr.at<float>(8) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;
		// out rotation
		//////////////////////////////////////////////////////////////////////////////////////

	
		/////////////////////////////////////////////////////////////////
		outPy<< "_T = FRAME( vec=ttt, mat=rrr )" << std::endl;
		/////////////////////////////////////////////////////////////////

	}
	// write out py
	/////////////////////////////////////////////////
	
	std::cout<< "press any key..."<< std::endl;
	cvWaitKey (0);
	cvDestroyWindow("Calibration");


	cvReleaseImage(&src_img);
	cvReleaseMat(&intrinsic);
	cvReleaseMat(&distortion);

	return 0;
}
Пример #9
0
bool solvePnP( InputArray _opoints, InputArray _ipoints,
               InputArray _cameraMatrix, InputArray _distCoeffs,
               OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags )
{
    CV_INSTRUMENT_REGION()

    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );

    Mat rvec, tvec;
    if( flags != SOLVEPNP_ITERATIVE )
        useExtrinsicGuess = false;

    if( useExtrinsicGuess )
    {
        int rtype = _rvec.type(), ttype = _tvec.type();
        Size rsize = _rvec.size(), tsize = _tvec.size();
        CV_Assert( (rtype == CV_32F || rtype == CV_64F) &&
                   (ttype == CV_32F || ttype == CV_64F) );
        CV_Assert( (rsize == Size(1, 3) || rsize == Size(3, 1)) &&
                   (tsize == Size(1, 3) || tsize == Size(3, 1)) );
    }
    else
    {
        int mtype = CV_64F;
        // use CV_32F if all PnP inputs are CV_32F and outputs are empty
        if (_ipoints.depth() == _cameraMatrix.depth() && _ipoints.depth() == _opoints.depth() &&
            _rvec.empty() && _tvec.empty())
            mtype = _opoints.depth();

        _rvec.create(3, 1, mtype);
        _tvec.create(3, 1, mtype);
    }
    rvec = _rvec.getMat();
    tvec = _tvec.getMat();

    Mat cameraMatrix0 = _cameraMatrix.getMat();
    Mat distCoeffs0 = _distCoeffs.getMat();
    Mat cameraMatrix = Mat_<double>(cameraMatrix0);
    Mat distCoeffs = Mat_<double>(distCoeffs0);
    bool result = false;

    if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
    {
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
        epnp PnP(cameraMatrix, opoints, undistortedPoints);

        Mat R;
        PnP.compute_pose(R, tvec);
        Rodrigues(R, rvec);
        result = true;
    }
    else if (flags == SOLVEPNP_P3P)
    {
        CV_Assert( npoints == 4);
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
        p3p P3Psolver(cameraMatrix);

        Mat R;
        result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
        if (result)
            Rodrigues(R, rvec);
    }
    else if (flags == SOLVEPNP_AP3P)
    {
        CV_Assert( npoints == 4);
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
        ap3p P3Psolver(cameraMatrix);

        Mat R;
        result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
        if (result)
            Rodrigues(R, rvec);
    }
    else if (flags == SOLVEPNP_ITERATIVE)
    {
        CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
        CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
        CvMat c_rvec = rvec, c_tvec = tvec;
        cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
                                     c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
                                     &c_rvec, &c_tvec, useExtrinsicGuess );
        result = true;
    }
    /*else if (flags == SOLVEPNP_DLS)
    {
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);

        dls PnP(opoints, undistortedPoints);

        Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
        bool result = PnP.compute_pose(R, tvec);
        if (result)
            Rodrigues(R, rvec);
        return result;
    }
    else if (flags == SOLVEPNP_UPNP)
    {
        upnp PnP(cameraMatrix, opoints, ipoints);

        Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
        PnP.compute_pose(R, tvec);
        Rodrigues(R, rvec);
        return true;
    }*/
    else
        CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
    return result;
}
Пример #10
0
void ProjectionModel::calculateProjection()
{
    if(intrinsic_matrix != 0 && distortion_coeffs != 0)
    {
        int corner_count = Chessboard::board_total;

        cvCvtColor(sourceImg, gray_image, CV_RGB2GRAY);

        int found = cvFindChessboardCorners(gray_image,
                                            board_sz,
                                            corners,
                                            &corner_count,
                                            CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
        if(!found)
        {
            return;
        }

        cvFindCornerSubPix(gray_image,
                           corners,
                           corner_count,
                           cvSize(11,11),
                           cvSize(-1,-1),
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));

        objPts[0].x = 0;
        objPts[0].y = 0;
        objPts[1].x = Chessboard::board_w - 1;
        objPts[1].y = 0;
        objPts[2].x = 0;
        objPts[2].y = Chessboard::board_h - 1;
        objPts[3].x = Chessboard::board_w - 1;
        objPts[3].y = Chessboard::board_h - 1;
        imgPts[3] = corners[0];
        imgPts[2] = corners[Chessboard::board_w - 1];
        imgPts[1] = corners[(Chessboard::board_h - 1) * Chessboard::board_w];
        imgPts[0] = corners[(Chessboard::board_h - 1) * Chessboard::board_w + Chessboard::board_w - 1];

        cvGetPerspectiveTransform(objPts, imgPts, H);

        for(int i = 0; i < 4; ++i)
        {
            CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
            CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) = cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
        }

        cvFindExtrinsicCameraParams2(object_points,
                                     image_points,
                                     intrinsic_matrix,
                                     distortion_coeffs,
                                     rotation_vector,
                                     translation_vector);

        cvRodrigues2(rotation_vector, rotation_matrix);

        for(int f = 0; f < 3; f++)
        {
            for(int c = 0; c < 3; c++)
            {
                fullMatrix[c * 4 + f] = rotation_matrix->data.fl[f * 3 + c];   //transposed
            }
        }

        fullMatrix[3] = 0.0;
        fullMatrix[7] = 0.0;
        fullMatrix[11] = 0.0;
        fullMatrix[12] = translation_vector->data.fl[0];
        fullMatrix[13] = translation_vector->data.fl[1];
        fullMatrix[14] = translation_vector->data.fl[2];
        fullMatrix[15] = 1.0;
    }
}
void ImageOverlayer::initializeCamParams()
{
  int nframes=1;
  int n = 9; //Number of points used to process
  int N=nframes*n;
  
  vector<CvPoint2D32f> temp(n);
  vector<int> npoints;
  vector<CvPoint3D32f> objectPoints;
  vector<CvPoint2D32f> points[2];
  points[0].resize(n);
  points[1].resize(n);
  
  double R[3][3], T[3], E[3][3], F[3][3];
  double Q[4][4];
    

/*************************************************************/    
    
   _M1 = cvMat(3, 3, CV_64F, M1 );
   _M2 = cvMat(3, 3, CV_64F, M2 );
   _D1 = cvMat(1, 5, CV_64F, D1 );
   _D2 = cvMat(1, 5, CV_64F, D2 );
  CvMat _R = cvMat(3, 3, CV_64F, R );
  CvMat _T = cvMat(3, 1, CV_64F, T );
  CvMat _E = cvMat(3, 3, CV_64F, E );
  CvMat _F = cvMat(3, 3, CV_64F, F );
  CvMat _Q = cvMat(4,4, CV_64F, Q);
  
  vector<CvPoint2D32f>& pts = points[0];

  
  //Pontos XY pixels left
  points[0][0].x=34.0;
  points[0][0].y=336.0;
  points[0][1].x=502.0;
  points[0][1].y=156.0;
  points[0][2].x=1280.0;
  points[0][2].y=279.0;
  points[0][3].x=664.0;
  points[0][3].y=174.0;
  points[0][4].x=914.0;
  points[0][4].y=209.0;
  points[0][5].x=248.0;
  points[0][5].y=300.0;
  points[0][6].x=663.0;
  points[0][6].y=482.0;
  points[0][7].x=185.0;
  points[0][7].y=364.0;
  points[0][8].x=400.0;
  points[0][8].y=507.0;  

  
  //Pontos XY pixels right
  points[1][0].x=866.0;
  points[1][0].y=942.0;
  points[1][1].x=98.0;
  points[1][1].y=376.0;
  points[1][2].x=856.0;
  points[1][2].y=72.0;
  points[1][3].x=445.0;
  points[1][3].y=222.0;
  points[1][4].x=690.0;
  points[1][4].y=128.0;
  points[1][5].x=779.0;
  points[1][5].y=442.0;
  points[1][6].x=1162.0;
  points[1][6].y=161.0;
  points[1][7].x=1061.0;
  points[1][7].y=413.0;
  points[1][8].x=1244.0;
  points[1][8].y=215.0;
  
  
  npoints.resize(nframes,n);
  objectPoints.resize(nframes*n);
  
  /* 3D points (x,y,z) related to the 2D points from left and right image - minimum: 8 points*/
  
  objectPoints[0] = cvPoint3D32f(6.0,-4.5,0.0);
  objectPoints[1] = cvPoint3D32f(0.0,-4.5,0.0);
  objectPoints[2] = cvPoint3D32f(0.0,+4.5,0.0);
  objectPoints[3] = cvPoint3D32f(0.0,-1.5,0.0);
  objectPoints[4] = cvPoint3D32f(0.0,+1.5,0.0);
  objectPoints[5] = cvPoint3D32f(4.3,-2.7,0.0);
  objectPoints[6] = cvPoint3D32f(4.3,+2.7,0.0);
  objectPoints[7] = cvPoint3D32f(5.25,-1.75,0.0);
  objectPoints[8] = cvPoint3D32f(5.25,+1.75,0.0);  
  
  for( int i = 1; i < nframes; i++ )
      copy( objectPoints.begin(), objectPoints.begin() + n,
      objectPoints.begin() + i*n );
  
  CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
  CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
  CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
  CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
  
   
   /**************************************************************************/
  double R1[3][3], R2[3][3], P1[3][4], P2[3][4];

  CvMat _R1 = cvMat(3, 3, CV_64F, R1);
  CvMat _R2 = cvMat(3, 3, CV_64F, R2);
  CvMat _P1 = cvMat(3, 4, CV_64F, P1);
  CvMat _P2 = cvMat(3, 4, CV_64F, P2);

/************************************** StereoCalibration - returns R T R1 R2 T1 T2 **************************************/   
  CvSize imageSize = cvSize(1294,964);

  cvStereoCalibrate( &_objectPoints, &_imagePoints1,
      &_imagePoints2, &_npoints,
      &_M1, &_D1, &_M2, &_D2,
      imageSize, &_R, &_T, &_E, &_F,
      cvTermCriteria(CV_TERMCRIT_ITER+
      CV_TERMCRIT_EPS, 30, 1e-5),
      CV_CALIB_USE_INTRINSIC_GUESS+ CV_CALIB_FIX_ASPECT_RATIO);

     
/*************************************************************************************************************************/ 

/***************************************** Extrinsic Parameters **********************************************************/
  CvMat* Rvec_left = cvCreateMat( 3, 1, CV_64F );
  Tvec_left = cvCreateMat( 3, 1, CV_64F );
  CvMat* Rvec_right = cvCreateMat( 3, 1, CV_64F );
  Tvec_right = cvCreateMat( 3, 1, CV_64F ); 
  
  cvFindExtrinsicCameraParams2(&_objectPoints, &_imagePoints1,&_M1,&_D1,Rvec_left,Tvec_left);
  
  Rvec_left_n = cvCreateMat( 3, 3, CV_64F );
  cvRodrigues2(Rvec_left,Rvec_left_n,0);
    

  cvFindExtrinsicCameraParams2(&_objectPoints, &_imagePoints2,&_M2,&_D2,Rvec_right,Tvec_right);
  
  Rvec_right_n = cvCreateMat( 3, 3, CV_64F );
  cvRodrigues2(Rvec_right,Rvec_right_n,0);
    
/*************************************************************************************************************************/
}
Пример #12
0
int main(int argc, char* argv[]) {
  if(argc != 6){
	printf("too few args\n");
	return -1;
  }
  // INPUT PARAMETERS:
  //
  int       board_w    = atoi(argv[1]);
  int       board_h    = atoi(argv[2]);
  int       board_n    = board_w * board_h;
  CvSize    board_sz   = cvSize( board_w, board_h );
  CvMat*    intrinsic  = (CvMat*)cvLoad(argv[3]);
  CvMat*    distortion = (CvMat*)cvLoad(argv[4]);
  IplImage* image      = 0;
  IplImage* gray_image = 0;
  if( (image = cvLoadImage(argv[5])) == 0 ) {
    printf("Error: Couldn't load %s\n",argv[5]);
    return -1;
  }
  
  CvMat* image_points      = cvCreateMat(1*board_n,2,CV_32FC1);
  CvMat* object_points     = cvCreateMat(1*board_n,3,CV_32FC1);
  
  CvMat* objdrawpoints = cvCreateMat(1,1,CV_32FC3);
  CvMat* imgdrawpoints = cvCreateMat(1,1,CV_32FC2);
  float x=0;
  float y=0;
  float z=0;
  
  double grid_width=2.85;
  gray_image = cvCreateImage( cvGetSize(image), 8, 1 );
  cvCvtColor(image, gray_image, CV_BGR2GRAY );

  CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
  int corner_count = 0;
  int found = cvFindChessboardCorners(
	gray_image,
	board_sz,
	corners,
	&corner_count,
	CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
  );
  if(!found){
	printf("Couldn't aquire chessboard on %s, "
	  "only found %d of %d corners\n",
	  argv[5],corner_count,board_n
	);
	return -1;
  }
  //Get Subpixel accuracy on those corners:
  cvFindCornerSubPix(
	gray_image,
	corners,
	corner_count,
	cvSize(11,11),
	cvSize(-1,-1),
	cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1 )
  );

// If we got a good board, add it to our data
  for( int i=0, j=0; j<board_n; ++i,++j ) {
	CV_MAT_ELEM(*image_points, float,i,0) = corners[j].x;
	CV_MAT_ELEM(*image_points, float,i,1) = corners[j].y;
	CV_MAT_ELEM(*object_points,float,i,0) =grid_width*( j/board_w);
	//  cout<<j/board_w<<" "<<j%board_w<<endl;
	CV_MAT_ELEM(*object_points,float,i,1) = grid_width*(j%board_w);
	CV_MAT_ELEM(*object_points,float,i,2) = 0.0f;
  }

  // DRAW THE FOUND CHESSBOARD
  //
  cvDrawChessboardCorners(
	image,
	board_sz,
	corners,
	corner_count,
	found
  );

  // FIND THE HOMOGRAPHY
  //
  CvMat *trans = cvCreateMat( 1, 3, CV_32F);
  CvMat *rot = cvCreateMat( 1, 3, CV_32F);

  // LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
  //
  cvFindExtrinsicCameraParams2(object_points,image_points,intrinsic,distortion,rot,trans);
  
//  cvSave("trans.xml",trans); 
//  cvSave("rot.xml",rot); 
  int key = 0;
  IplImage *drawn_image = cvCloneImage(image);
  cvNamedWindow("translation");

  // LOOP TO ALLOW USER TO PLAY WITH HEIGHT:
  //
  // escape key stops
  //
  
//  cvSetZero(trans);
//  cvSetZero(rot);
  while(key != 27) {
	cvCopy(image,drawn_image);
	
	if(key==97)x--;
	else if(key==113)x++;
	else if(key==115)y--;
	else if(key==119)y++;
	else if(key==100)z--;
	else if(key==101)z++;
	
	((float*)(objdrawpoints->data.ptr))[0]=x;
	((float*)(objdrawpoints->data.ptr))[1]=y;
	((float*)(objdrawpoints->data.ptr))[2]=z;
	printf("%f %f %f\n",x,y,z);
	cvProjectPoints2(objdrawpoints,rot,trans,intrinsic,distortion,imgdrawpoints);
	cvCircle(drawn_image,cvPoint(((float*)(imgdrawpoints->data.ptr))[0],((float*)(imgdrawpoints->data.ptr))[1]),5,cvScalar(255,0,0),-1);
	printf("%f %f\n",((float*)(imgdrawpoints->data.ptr))[0],((float*)(imgdrawpoints->data.ptr))[1]);
	cvShowImage( "translation", drawn_image );
	key = cvWaitKey(3);
  }
  cvDestroyWindow( "translation" );
  //must add a lot of memory releasing here
  return 0;
}
Пример #13
0
float BoardDetector::detect(const vector<Marker> &detectedMarkers,const  BoardConfiguration &BConf, Board &Bdetected, Mat camMatrix,Mat distCoeff,float markerSizeMeters)throw (cv::Exception)
{
// cout<<"markerSizeMeters="<<markerSizeMeters<<endl;
    ///find among detected markers these that belong to the board configuration
    Mat detected(BConf._markersId.size(),CV_32SC1); //stores the indices of the makers
    detected.setTo(Scalar(-1));//-1 mean not detected
    int nMarkInBoard=0;//total number of markers detected
    for (unsigned int i=0;i<detectedMarkers.size();i++) {
        bool found=false;
        int id=detectedMarkers[i].id;
        //find it
        for (  int j=0;j<detected.size().height && ! found;j++)
            for (  int k=0;k<detected.size().width && ! found;k++)
                if ( BConf._markersId.at<int>(j,k)==id) {
                    detected.at<int>(j,k)=i;
                    nMarkInBoard++;
                    found=true;
                    Bdetected.push_back(detectedMarkers[i]);
                    if (markerSizeMeters>0)
                        Bdetected.back().ssize=markerSizeMeters;
                }
    }
    Bdetected.conf=BConf;
    if (markerSizeMeters!=-1)
        Bdetected.markerSizeMeters=markerSizeMeters;
//calculate extrinsic if there is information for that
    if (camMatrix.rows!=0 && markerSizeMeters>0 && detectedMarkers.size()>1) {
        // now, create the matrices for finding the extrinsics
        Mat objPoints(4*nMarkInBoard,3,CV_32FC1);
        Mat imagePoints(4*nMarkInBoard,2,CV_32FC1);
        //size in meters of inter-marker distance
        double markerDistanceMeters= double(BConf._markerDistancePix) * markerSizeMeters / double(BConf._markerSizePix);



        int currIndex=0;
        for (  int y=0;y<detected.size().height;y++)
            for (  int x=0;x<detected.size().width;x++) {
                if (  detected.at<int>(y,x)!=-1 ) {

                    vector<Point2f> points =detectedMarkers[ detected.at<int>(y,x) ];
                    //set first image points
                    for (int p=0;p<4;p++) {
                        imagePoints.at<float>(currIndex+p,0)=points[p].x;
                        imagePoints.at<float>(currIndex+p,1)=points[p].y;
                    }

                    //tranaltion to make the Ref System be in center
                    float TX=-(  ((detected.size().height-1)*(markerDistanceMeters+markerSizeMeters) +markerSizeMeters) /2) ;
                    float TY=-(  ((detected.size().width-1)*(markerDistanceMeters+markerSizeMeters) +markerSizeMeters)/2);
                    //points in real refernce system. We se the center in the bottom-left corner
                    float AY=x*(markerDistanceMeters+markerSizeMeters ) +TY;
                    float AX=y*(markerDistanceMeters+markerSizeMeters )+TX;
                    objPoints.at<float>( currIndex,0)= AX;
                    objPoints.at<float>( currIndex,1)= AY;
                    objPoints.at<float>( currIndex,2)= 0;
                    objPoints.at<float>( currIndex+1,0)= AX;
                    objPoints.at<float>( currIndex+1,1)= AY+markerSizeMeters;
                    objPoints.at<float>( currIndex+1,2)= 0;
                    objPoints.at<float>( currIndex+2,0)= AX+markerSizeMeters;
                    objPoints.at<float>( currIndex+2,1)= AY+markerSizeMeters;
                    objPoints.at<float>( currIndex+2,2)= 0;
                    objPoints.at<float>( currIndex+3,0)= AX+markerSizeMeters;
                    objPoints.at<float>( currIndex+3,1)= AY;
                    objPoints.at<float>( currIndex+3,2)= 0;
                    currIndex+=4;
                }
            }

        CvMat cvCamMatrix=camMatrix;
        CvMat cvDistCoeffs;
        Mat zeros=Mat::zeros(4,1,CV_32FC1);
        if (distCoeff.rows>=4)  cvDistCoeffs=distCoeff;
        else  cvDistCoeffs=zeros;
        CvMat cvImgPoints=imagePoints;
        CvMat cvObjPoints=objPoints;

        CvMat cvRvec=Bdetected.Rvec;
        CvMat cvTvec=Bdetected.Tvec;
        cvFindExtrinsicCameraParams2(&cvObjPoints, &cvImgPoints, &cvCamMatrix, &cvDistCoeffs,&cvRvec,&cvTvec);
        //now, rotate 90 deg in X so that Y axis points up
        rotateXAxis(Bdetected.Rvec);
        //cout<<Bdetected.Rvec.at<float>(0,0)<<" "<<Bdetected.Rvec.at<float>(1,0)<<Bdetected.Rvec.at<float>(2,0)<<endl;
        //cout<<Bdetected.Tvec.at<float>(0,0)<<" "<<Bdetected.Tvec.at<float>(1,0)<<Bdetected.Tvec.at<float>(2,0)<<endl;
    }
    return double(nMarkInBoard)/double( BConf._markersId.size().width*BConf._markersId.size().height);
}
Пример #14
0
bool PnPSolverOpenCV::pnpSolve(CorrespondenceSet correspondences, bool useLast)
{
	if (correspondences.size() < 4)
	{
		return false;
	}
    //create cvmat structures
    CvMat *modelPoints = cvCreateMat(correspondences.size(), 3, CV_32FC1);
    CvMat *projectedPoints = cvCreateMat(correspondences.size(), 2, CV_32FC1);
    CvMat *pointsCount = cvCreateMat(1, 1, CV_32SC1);

    CvMat *cameraMatrixCV = cvCreateMat(3, 3, CV_32FC1);
    cvSet(cameraMatrixCV, cvScalarAll(0));
    cameraMatrixCV->data.fl[2] = static_cast<float> (this->cameraMatrix(0,2));
    cameraMatrixCV->data.fl[5] = static_cast<float> (this->cameraMatrix(1,2));
    cameraMatrixCV->data.fl[0] = static_cast<float> (this->cameraMatrix(0,0));
    cameraMatrixCV->data.fl[4] = static_cast<float> (this->cameraMatrix(1,1));
    cameraMatrixCV->data.fl[8] = 1;

	CvMat *distMatrixCV = cvCreateMat(8, 1, CV_32FC1);
	cvSet(distMatrixCV, cvScalarAll(0));
	for(int i = 0; i < this->distCoeffs.cols()*this->distCoeffs.rows(); i++)
	{
		distMatrixCV->data.fl[i] = this->distCoeffs[i];
	}

    int counter = 0;
    for(CorrespondenceSet::iterator cit = correspondences.begin(); cit != correspondences.end(); cit++, counter++)
    {
        cvSet2D(modelPoints, counter, 0, cvScalar(cit->mx));
        cvSet2D(modelPoints, counter, 1, cvScalar(cit->my));
        cvSet2D(modelPoints, counter, 2, cvScalar(cit->mz));
        cvSet2D(projectedPoints, counter, 0, cvScalar(cit->px));
        cvSet2D(projectedPoints, counter, 1, cvScalar(cit->py));
    }
    cvSet2D(pointsCount, 0, 0, cvScalar(counter));
	
    CvMat *rotationMatrix = cvCreateMat(3, 3, CV_32FC1);
    CvMat *rotationVec = cvCreateMat(1, 3, CV_32FC1);
    cvSet(rotationVec, cvScalarAll(0));
    CvMat *translationVec = cvCreateMat(1, 3, CV_32FC1);
    cvSet(translationVec, cvScalarAll(0));

	if(useLast)
	{
		for(int ri = 0; ri < 3; ri++)
		{
			for(int ci = 0; ci < 3; ci++)
			{
				rotationMatrix->data.fl[ri*3 + ci] = static_cast<float>(this->worldTransformMatrix(ri, ci));
			}
			translationVec->data.fl[ri] = static_cast<float>(this->worldTransformMatrix(ri, 3));
		}
		
		cvRodrigues2(rotationMatrix, rotationVec);
	}

    //we do distortion stuff elsewhere
	cvFindExtrinsicCameraParams2(modelPoints, projectedPoints, cameraMatrixCV, distMatrixCV, rotationVec, translationVec, (useLast) ? 1 : 0);


	//cv::Mat mmodelPoints(modelPoints, true);
	//cv::Mat mprojectedPoints(projectedPoints, true);
	//cv::Mat mcameraMatrixCV(cameraMatrixCV, true);
	//cv::Mat mdistortion(8, 1, CV_32FC1); mdistortion.setTo(0);
	//cv::Mat mrotationVec(rotationVec);
	//cv::Mat mtranslationVec(translationVec);
	//cv::solvePnPRansac(mmodelPoints, mprojectedPoints, mcameraMatrixCV, mdistortion,
	//	mrotationVec, mtranslationVec, (useLast)?1:0,
	//	100, 3.0f, 13, cv::noArray(), 0);

    double rotMat[3][3];
    double transVec[3];

    cvRodrigues2(rotationVec, rotationMatrix);

	

    for(int ri = 0; ri < 3; ri++)
    {
        for(int ci = 0; ci < 3; ci++)
        {
            rotMat[ri][ci] = rotationMatrix->data.fl[ri*3 + ci];
        }
        transVec[ri] = translationVec->data.fl[ri];
    }

	
	if (transVec[2] < 0) //we are behind the marker - this shouldn't happen - invert it!
	{	
		//inverse rotation and translation
		for (int ri = 0; ri < 3; ri++)
		{
			for (int ci = 0; ci < 3; ci++)
			{
				rotationMatrix->data.fl[ri * 3 + ci] = rotMat[ci][ri];
			}
			translationVec->data.fl[ri] = -transVec[ri];
		}

		cvRodrigues2(rotationMatrix, rotationVec);

		cvFindExtrinsicCameraParams2(modelPoints, projectedPoints, cameraMatrixCV, distMatrixCV, rotationVec, translationVec, 1);

		cvRodrigues2(rotationVec, rotationMatrix);

		for (int ri = 0; ri < 3; ri++)
		{
			for (int ci = 0; ci < 3; ci++)
			{
				rotMat[ri][ci] = rotationMatrix->data.fl[ri * 3 + ci];
			}
			transVec[ri] = translationVec->data.fl[ri];
		}
	}

    cvReleaseMat(&rotationMatrix);
    cvReleaseMat(&modelPoints);
    cvReleaseMat(&projectedPoints);
    cvReleaseMat(&pointsCount);
    cvReleaseMat(&rotationVec);
    cvReleaseMat(&translationVec);
    cvReleaseMat(&cameraMatrixCV);

    this->worldTransformMatrix << rotMat[0][0], rotMat[0][1], rotMat[0][2], transVec[0],
            rotMat[1][0], rotMat[1][1], rotMat[1][2], transVec[1],
            rotMat[2][0], rotMat[2][1], rotMat[2][2], transVec[2],
            0.0, 0.0, 0.0, 1.0;

    this->worldPos = Eigen::Vector3d(transVec[0], transVec[1], transVec[2]);
    this->worldQuat = Eigen::Quaterniond(worldTransformMatrix.block<3,3>(0, 0));

    return true;
}
RTC::ReturnCode_t ImageCalibration::onExecute(RTC::UniqueId ec_id)
{	
	
	board_sz = cvSize(m_board_w, m_board_h);
	
	//Calibrationパターンを計算する。
	if (m_inputImageIn.isNew()) {

		m_inputImageIn.read();

		if(m_keyIn.isNew()){
			m_keyIn.read();
			key = (int)m_key.data;
		}
		
		if(g_temp_w != m_inputImage.width || g_temp_h != m_inputImage.height){
		
			inputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			outputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			tempImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			undistortionImage = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			birds_image = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			
			intrinsicMatrix = cvCreateMat(3,3,CV_64FC1);
			distortionCoefficient = cvCreateMat(4,1,CV_64FC1);
			
			captureCount = 0;
			findFlag = 0;

			mapx = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);
			mapy = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);

			corners = new CvPoint2D32f[m_board_w * m_board_h];
			
			g_temp_w = m_inputImage.width;
			g_temp_h = m_inputImage.height;
		
		}

		//Capture開始する。
		memcpy(inputImage_buff->imageData,(void *)&(m_inputImage.pixels[0]), m_inputImage.pixels.length());

//		tempImage_buff = cvCloneImage(inputImage_buff);
		//OutPortに出力する。
		int len = inputImage_buff->nChannels * inputImage_buff->width * inputImage_buff->height;
		m_origImage.pixels.length(len);
		
		memcpy((void *)&(m_origImage.pixels[0]), inputImage_buff->imageData, len);
		m_origImage.width = inputImage_buff->width;
		m_origImage.height = inputImage_buff->height;

		m_origImageOut.write();
		
		//Capture確認用のWindowの生成
		//cvShowImage("Capture", inputImage_buff);
		cvWaitKey(1);
		
		//SpaceBarを押すとサンプル映像5枚を撮る
		if (key == ' ') {
			
			tempImage_buff = cvCloneImage(inputImage_buff);
			//映像を生成する
			IplImage *grayImage = cvCreateImage(cvGetSize(tempImage_buff), 8, 1);

			//行列の生成
			CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //世界座標用行列
			CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES ,2, CV_64FC1); //画像座標用行列
			CvMat *pointCounts = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 1, CV_32SC1); //コーナー数の行列
			CvMat *rotationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //回転ベクトル
			CvMat *translationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); 

			//世界座標を設定する
			for (int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				for ( int j = 0; j < (m_board_w * m_board_h); j++) {
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 0, (j % m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 1, (j / m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 2, 0.0);
				}
			}

			//コーナー数を設定
			for(int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				cvSetReal2D(pointCounts, i, 0, (m_board_w * m_board_h));
			}
			
			//コーナーを検出する。
			findFlag = findCorners(tempImage_buff, grayImage, corners);

			if (findFlag != 0) {
			
				//コーナーをすべて検出した場合
				//映像座標を設定する。
				for (;;){
					for (int i = 0; i < (m_board_w * m_board_h); i++){
 						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 0, corners[i].x);
						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 1, corners[i].y);
					}
				
					captureCount++;    

					printf("%d枚目キャプチャしました\n", captureCount);

					if (captureCount == NUM_OF_BACKGROUND_FRAMES) {
						//設定した回数チェックパターンを撮った場合
						//カメラパラメータを推定する。
						cvCalibrateCamera2(
							worldCoordinates,
							imageCoordinates,
							pointCounts,
							cvGetSize(inputImage_buff),
							intrinsicMatrix,
							distortionCoefficient,
							rotationVectors,
							translationVectors,
							CALIBRATE_CAMERA_FLAG
						);
						
						//情報をTextとして出力
						printf("\nレンズ歪み係数\n");
						saveRenseMatrix(distortionCoefficient);
						printMatrix("%lf", distortionCoefficient);
						
						//m_renseParameter.data = renseParameters;
												
						printf("\n内部パラメータ\n");
						saveInternalParameterMatrix(intrinsicMatrix);
						printMatrix("%lf ", intrinsicMatrix);

						//m_internalParameter.data = internalParameter;
						
						captureCount = 0;
						break;
						
					}
				}
			}

			if (findFlag != 0){
				InParameter = 1;
			}else if (findFlag == 0) {
				InParameter = 0;
			}
			
			//メモリ解除
			cvReleaseMat(&worldCoordinates);
			cvReleaseMat(&imageCoordinates);
			cvReleaseMat(&pointCounts);
			cvReleaseMat(&rotationVectors);
			cvReleaseMat(&translationVectors);
			cvReleaseImage(&grayImage);

		}
		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

	}
	//外部パターンを取得
	if (key == ' ' && m_inputImageIn.isNew() && InParameter == 1) {

		//行列の生成
		CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h), 3, CV_64FC1); //世界座標用行列
		CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h), 2, CV_64FC1); //画像座標用行列
		CvMat *rotationVectors = cvCreateMat(1, 3, CV_64FC1); //回転ベクトル
		CvMat *rotationMatrix = cvCreateMat(3, 3, CV_64FC1); //回転行列
		CvMat *translationVectors = cvCreateMat(1, 3, CV_64FC1); 

		//世界座標を設定する
		for (int i = 0; i < (m_board_w * m_board_h); i++){
			cvSetReal2D(worldCoordinates, i, 0, (i % m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 1, (i / m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 2, 0.0);
		}
	
		cvWaitKey( 1 );
	
		//	スペースキーが押されたら
		if ( findFlag != 0 ) {
			//	コーナーがすべて検出された場合
			//	画像座標を設定する
			for ( int i = 0; i < (m_board_w * m_board_h); i++ ){
				cvSetReal2D( imageCoordinates, i, 0, corners[i].x);
				cvSetReal2D( imageCoordinates, i, 1, corners[i].y);
			}

			//	外部パラメータを推定する
			cvFindExtrinsicCameraParams2(
				worldCoordinates,
				imageCoordinates,
				intrinsicMatrix,
				distortionCoefficient,
				rotationVectors,
				translationVectors
			);

			//	回転ベクトルを回転行列に変換する
			cvRodrigues2( rotationVectors, rotationMatrix, NULL );

			printf( "\n外部パラメータ\n" );
			printExtrinsicMatrix( rotationMatrix, translationVectors );
			saveExternalParameterMatrix(rotationMatrix, translationVectors);

			m_externalParameter.data = CORBA::string_dup(externalParameter);
			m_renseParameter.data = CORBA::string_dup(renseParameters);
			m_internalParameter.data = CORBA::string_dup(internalParameter);
						
		}
		//メモリを解放
		cvReleaseMat( &worldCoordinates );
		cvReleaseMat( &imageCoordinates );
		cvReleaseMat( &rotationVectors );
		cvReleaseMat( &rotationMatrix );
		cvReleaseMat( &translationVectors );
		
		//X,Y初期化
		cvInitUndistortMap(
			intrinsicMatrix,
			distortionCoefficient,
			mapx,
			mapy
		);
		//外部パラメータ確認フラグ
		outParameter = 1;
		key = 0;
				
	 }
	
	//内部外部パラメータの出力に成功したら
	if (InParameter == 1 && outParameter == 1) {

		//	レンズ歪みを補正した画像を生成する
		cvUndistort2(
			inputImage_buff,
			undistortionImage,
			intrinsicMatrix,
			distortionCoefficient
		);

		//cvShowImage("歪み補正", undistortionImage);

		//OutPortに補正映像を出力する。
		//int len = undistortionImage->nChannels * undistortionImage->width * undistortionImage->height;
		//m_calbImage.pixels.length(len);
		
		//歪み補正映像をOutPortとしてメモリコピーする。
		//memcpy((void *)&(m_calbImage.pixels[0]), undistortionImage->imageData, len);
		//m_calbImageOut.write();
		
		//鳥瞰図の座標設定
		objPts[0].x = 0;					objPts[0].y = 0;
		objPts[1].x = m_board_w-1;			objPts[1].y = 0;
		objPts[2].x = 0;					objPts[2].y = m_board_h-1;
		objPts[3].x = m_board_w-1;			objPts[3].y = m_board_h-1;
		
		//取得するCornerを設定
		imgPts[0] = corners[0];
		imgPts[1] = corners[m_board_w - 1];
		imgPts[2] = corners[(m_board_h - 1) * m_board_w];
		imgPts[3] = corners[(m_board_h - 1) * m_board_w + m_board_w - 1];
		
		//指定したCornerに○を作成する
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0,0,255), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0,255,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255,0,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255,255,0), 3);

		CvMat *H = cvCreateMat(3, 3, CV_32F);
		cvGetPerspectiveTransform(objPts, imgPts, H);
		
		//高さを設定する。
		CV_MAT_ELEM(*H, float, 2, 2) = m_camera_Height;
		
		//Warppingを実行
		cvWarpPerspective(inputImage_buff, birds_image, H, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		
		//鳥瞰図をOutPortに出力する。
		int len = birds_image->nChannels * birds_image->width * birds_image->height;
		m_birdImage.pixels.length(len);
		memcpy((void *)&(m_birdImage.pixels[0]), birds_image->imageData, len);

		m_birdImage.width = inputImage_buff->width;
		m_birdImage.height = inputImage_buff->height;

		m_birdImageOut.write();

		cvWaitKey(10);

		//cvShowImage("Bird_Eye", birds_image);
		cvReleaseMat(&H);

		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

		key = 0;

	}
Пример #16
0
int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
			Z += 0.5;
		if (key == 'd')
			Z -= 0.5;
	}

	//SHOW ROTATION AND TRANSLATION VECTORS
	CvMat *image_points = cvCreateMat(4, 1, CV_32FC2);
	CvMat *object_points = cvCreateMat(4, 1, CV_32FC3);
	for (int i = 0; i < 4; ++i) {
		CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
		CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) =
			cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
	}

	CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F);
	CvMat *Rot = cvCreateMat(3, 3, CV_32F);
	CvMat *Trans = cvCreateMat(3, 1, CV_32F);
	cvFindExtrinsicCameraParams2(object_points, image_points,
								 intrinsic, distortion, RotRodrigues, Trans);
	cvRodrigues2(RotRodrigues, Rot);

	//SAVE AND EXIT
	cvSave("Rot.xml", Rot);
	cvSave("Trans.xml", Trans);
	cvSave("H.xml", H);
	cvInvert(H, H_invt);
	cvSave("H_invt.xml", H_invt);	//Bottom row of H invert is horizon line
	return 0;
}