Пример #1
0
/* Converts rotation_matrix matrix to rotation_matrix vector or vice versa */
void  cvRodrigues( CvMat* rotation_matrix, CvMat* rotation_vector,
                   CvMat* jacobian, int conv_type )
{
    if( conv_type == CV_RODRIGUES_V2M )
        cvRodrigues2( rotation_vector, rotation_matrix, jacobian );
    else
        cvRodrigues2( rotation_matrix, rotation_vector, jacobian );
}
Пример #2
0
/**
 *	OptimizePair:
 *		Input:
 *			cam1 - the first camera (already optimized)
 *          cam2 - the second camera with its intrinsic matrix initialized
 *			dR - an initial relative 3x3 camera rotation matrix
 *          set1 - SIFT features in the first image
 *          set2 - SIFT features in the second image
 *          aryInlier - the homography iniliers
 *
 *		Ouput:
 *          cam2 - update cam2's optimized focal length and pose
 */
void OptimizeSingle( const CCamera& cam1, CCamera& cam2,
					double* dR,
					const CFeatureArray& set1,
					const CFeatureArray& set2, 
					const MatchArray& aryInlier )
{
	// Step 1. Initialize the camera pose of cam2

	// cam2's relative rotation to cam1
	CvMat matR = cvMat( 3, 3, CV_64F, dR );

	// cam1's absolute rotation
	double dRod1[3];
	CvMat matRod1 = cvMat( 3, 1, CV_64F, dRod1 );
	cam1.GetPose( dRod1 );

	double dRot1[9];
	CvMat matRot1 = cvMat( 3, 3, CV_64F, dRot1 );
	cvRodrigues2( &matRod1, &matRot1 );

	// compose R and Rot1 to get cam2's initial absolute rotation
	cvMatMul( &matR, &matRot1, &matR );

	double dRod2[3];
	CvMat matRod2 = cvMat( 3, 1, CV_64F, dRod2 );

	cvRodrigues2( &matR, &matRod2 );
	cam2.SetPose( dRod2 );

	// Step 2. Now we can perform bundle adjustment for cam2
	CBundleAdjust ba( 1, BA_ITER );
	ba.SetCamera( &cam2, 0 );

	// set points
	for( int i=0; i<aryInlier.size(); ++i )
	{
		const CFeature* ft1 = set1[ aryInlier[i].first ];
		const CFeature* ft2 = set2[ aryInlier[i].second ];

		double dir[3];
		cam1.GetRayDirectionWS( dir, cvPoint2D64f( ft1->x, ft1->y ) );
		
		// the 3d position
		CvPoint3D64f pt3 = cvPoint3D64f( dir[0]*radius, dir[1]*radius, dir[2]*radius );

		ba.SetPointProjection( pt3, 0, cvPoint2D64f( ft2->x, ft2->y ) );
	}

	ba.DoMotion();

	ba.GetAdjustedCamera( &cam2, 0 );
}
Пример #3
0
void saveExtrinsicParams(const char* fileName, 
                         CvMat* rotation, CvMat* translation ){
  //
  FILE* file;
  CvMat* R = cvCreateMat(3, 3, CV_64F);
  double xOffset = (CHESS_ROW_NUM - 1) * CHESS_ROW_DX;

  if((file = fopen(fileName, "w")) == NULL){
    fprintf(stderr, "file open error\n");
    return;
  }

  cvRodrigues2(rotation, R, 0);
  
  //inverting z axis and translate origin for some reasons
  fprintf(file, "%lf %lf %lf\n",
    -cvmGet(R, 0, 0), cvmGet(R, 0, 1), -cvmGet(R, 0, 2));
  fprintf(file, "%lf %lf %lf\n",
    -cvmGet(R, 1, 0), cvmGet(R, 1, 1), -cvmGet(R, 1, 2));
  fprintf(file, "%lf %lf %lf\n",
    -cvmGet(R, 2, 0), cvmGet(R, 2, 1), -cvmGet(R, 2, 2));

  fprintf(file, "%lf %lf %lf\n", 
	  cvmGet(translation, 0, 0) + cvmGet(R, 0, 0) * xOffset, 
	  cvmGet(translation, 1, 0) + cvmGet(R, 1, 0) * xOffset, 
	  cvmGet(translation, 2, 0) + cvmGet(R, 2, 0) * xOffset);

  fclose(file);

}
Пример #4
0
bool CamAugmentation::LoadOptimalStructureFromFile( char* cam_c_file, char *cam_rt_file ){
  FILE *stream;

  // Load camera calibration matrices into optimal structure:
  if( (stream = fopen( cam_c_file, "r+t" )) != NULL ){
    int num,width,height;
    while( fscanf( stream, "no: %i\nwidth: %i\nheight: %i\n", &num, &width, &height ) != EOF ){
      float h;
      CvMat* m = cvCreateMat( 3, 3, CV_64FC1 );			
      for(int i=0; i < 3; i++)
        for(int j=0; j < 3; j++) {
          fscanf( stream, "%f", &h );
          cvmSet(m,i,j,h);
        }
        fscanf( stream, "\n" );
        s_optimal.v_camera_width.push_back( width );
        s_optimal.v_camera_height.push_back( height );
        s_optimal.v_camera_c.push_back( m );
        printf( "Calibration matrix %i loaded!\n", num );
    }
    fclose( stream );
  } else {
    printf( "WARNING: Could not load matrices from file %s.", cam_c_file );
    return false;
  }

  // Load camera rotation-translation matrices into optimal structure:
  if( (stream = fopen( cam_rt_file, "r+t" )) != NULL ){
    int num,width,height;
    while( fscanf( stream, "no: %i\nwidth: %i\nheight: %i\n", &num, &width, &height ) != EOF ){
      float h;
      CvMat* m = cvCreateMat( 3, 4, CV_64FC1 );
      for(int i=0; i < 3; i++)
        for(int j=0; j < 4; j++) {
          fscanf( stream, "%f", &h );
          cvmSet(m,i,j,h);
        }
        fscanf( stream, "\n" );
        s_optimal.v_camera_r_t.push_back( m );

        // Create jacobian:
        CvMat* R = cvCreateMat( 3, 3, CV_64FC1 );
        CvMat* r = cvCreateMat( 3, 1, CV_64FC1 );
        CvMat* j = cvCreateMat( 3, 9, CV_64FC1 );
        CamCalibration::ExtractRotationTranslationFrom3x4Matrix( m, R, NULL );
        cvRodrigues2( R, r, j );
        s_optimal.v_camera_r_t_jacobian.push_back( j );
        printf( "Rotation-Translation matrix %i loaded and jacobian created!\n", num );
    }
    fclose( stream );
  } else {
    printf( "WARNING: Could not load matrices from file %s.", cam_rt_file );
    return false;
  }
  return true;
}
Пример #5
0
void calibrate ( CvMat* cornerPoints, CvMat* objectPoints,
		 CvMat* intrinsic, CvMat* distortion,
		 CvMat* rotation, CvMat* translation )
{

    CvMat* rotVector = cvCreateMat( 3, 1, CV_64F );
    cvFindExtrinsicCameraParams2( objectPoints, cornerPoints,
				  intrinsic, distortion, 
				  rotVector, translation );
    
    cvRodrigues2( rotVector, rotation, 0 );
    
    cvReleaseMat( &rotVector );
}
Пример #6
0
/**
 *	OptimizePair:
 *		Input:
 *			cam1 - the first camera with its intrinsic matrix and pose initialized ([R|T]=[I|0])
 *          cam2 - the second camera with its intrinsic matrix initialized
 *			dR - an initial relative 3x3 camera rotation matrix
 *          set1 - SIFT features in the first image
 *          set2 - SIFT features in the second image
 *          aryInlier - the homography iniliers
 *
 *		Ouput:
 *			cam1 - update cam1's optimized folcal length
 *          cam2 - update cam2's optimized focal length and pose
 */
void OptimizePair( CCamera& cam1, CCamera& cam2,
				  double* dR,
				  const CFeatureArray& set1,
				  const CFeatureArray& set2, 
				  const MatchArray& aryInlier )
{
	CBundleAdjust ba( 2, BA_ITER );

	// Step 1. To perform bundle adjustment, we initialize cam1 and cam2 
	//         as [K][I|0} and [K][R|0] respectively and optimize R using 
	//         bundle adjustment.
	double dRod[3];
	CvMat matRod = cvMat( 3, 1, CV_64F, dRod );
	CvMat matR = cvMat( 3, 3, CV_64F, dR );

	cvRodrigues2( &matR, &matRod );
	cam2.SetPose( dRod );

	// Set cameras
	ba.SetCamera( &cam1, 0 );
	ba.SetCamera( &cam2, 1 );

	// Step 2. We still need to create a set of 3D points. From each homography inlier, 
	//         a 3D point can be initialized by locating it on the ray that goes through 
	//         its projection.	
	for( int i=0; i<aryInlier.size(); ++i )
	{
		const CFeature* ft1 = set1[ aryInlier[i].first ];
		const CFeature* ft2 = set2[ aryInlier[i].second ];

		double dir[3];
		cam1.GetRayDirectionWS( dir, cvPoint2D64f( ft1->x, ft1->y ) );
		
		// the initialized 3d position
		CvPoint3D64f pt3 = cvPoint3D64f( dir[0]*radius, dir[1]*radius, dir[2]*radius );

		// set the 3d point and its projections in both images
		ba.SetPointProjection( pt3, 0, cvPoint2D64f( ft1->x, ft1->y ) );
		ba.SetPointProjection( pt3, 1, cvPoint2D64f( ft2->x, ft2->y ) );
	}

	// perform bundle adjustment
	ba.DoMotionAndStructure();

	// retrieve the optimized cameras
	ba.GetAdjustedCamera( &cam1, 0 );
	ba.GetAdjustedCamera( &cam2, 1 );
}
Пример #7
0
void CamAugmentation::SetParameterSet(){

  double a_R[9];		CvMat m_R 	  = cvMat( 3, 3, CV_64FC1,     a_R );
  double a_r[3];		CvMat m_r 	  = cvMat( 3, 1, CV_64FC1,     a_r );
  double a_T[3];		CvMat m_T	  = cvMat( 3, 1, CV_64FC1,     a_T );

  // Unpack Homography RT matrix:
  cvmSet( &m_T, 0, 0, -v_opt_param[0] );
  cvmSet( &m_T, 1, 0, -v_opt_param[1] );
  cvmSet( &m_T, 2, 0, -v_opt_param[2] );
  cvmSet( &m_r, 0, 0,  v_opt_param[3] );
  cvmSet( &m_r, 1, 0,  v_opt_param[4] );
  cvmSet( &m_r, 2, 0,  v_opt_param[5] );
  cvRodrigues2( &m_r, &m_R, v_homography_r_t_jacobian );
  CamCalibration::ComposeRotationTranslationTo3x4Matrix( v_homography_r_t, &m_R, &m_T);
}
Пример #8
0
void CamAugmentation::GetParameterSet(){

  // Clear old vector:
  v_opt_param.clear();

  double a_R[9];		CvMat m_R 	  = cvMat( 3, 3, CV_64FC1, a_R );
  double a_r[3];		CvMat m_r 	  = cvMat( 3, 1, CV_64FC1, a_r );
  double a_T[3];		CvMat m_T	  = cvMat( 3, 1, CV_64FC1, a_T );

  // Pack Homography RT matrix:
  CamCalibration::ExtractRotationTranslationFrom3x4Matrix( v_homography_r_t, &m_R, &m_T);
  v_opt_param.push_back( -cvmGet( &m_T, 0, 0 ) );
  v_opt_param.push_back( -cvmGet( &m_T, 1, 0 ) );
  v_opt_param.push_back( -cvmGet( &m_T, 2, 0 ) );
  cvRodrigues2( &m_R, &m_r, v_homography_r_t_jacobian );
  v_opt_param.push_back( cvmGet( &m_r, 0, 0 ) );
  v_opt_param.push_back( cvmGet( &m_r, 1, 0 ) );
  v_opt_param.push_back( cvmGet( &m_r, 2, 0 ) );
}
Пример #9
0
void glbRotVector2RotMat(GlbPoint3d angle, GlbRotmat &r)
{
	CvMat * rotVector1 = cvCreateMat(3, 1, CV_32FC1);
	rotVector1->data.fl[0] = angle.m_x;
	rotVector1->data.fl[1] = angle.m_y;
	rotVector1->data.fl[2] = angle.m_z;
	CvMat *GlbRotmat1 = cvCreateMat(3, 3, CV_32FC1);
	cvRodrigues2(rotVector1, GlbRotmat1);

	r.r11 = GlbRotmat1->data.fl[0];
	r.r12 = GlbRotmat1->data.fl[1];
	r.r13 = GlbRotmat1->data.fl[2];
	r.r21 = GlbRotmat1->data.fl[3];
	r.r22 = GlbRotmat1->data.fl[4];
	r.r23 = GlbRotmat1->data.fl[5];
	r.r31 = GlbRotmat1->data.fl[6];
	r.r32 = GlbRotmat1->data.fl[7];
	r.r33 = GlbRotmat1->data.fl[8];

	cvReleaseMat(&rotVector1);//cvReleaseMatHeader?
	cvReleaseMat(&GlbRotmat1);

}
Пример #10
0
void mexFunction(
    int nargout,
    mxArray *out[],
    int nargin,
    const mxArray *in[]
)
{
    /* declare variables */
    double *mx_r_in;
    double *mx_r_out;
    int output_dim = 3;
    
    /* check arguments */
    if (nargin != 1 || nargout > 1){
        mexErrMsgTxt("Wrong Number of arguments.");
        exit(1);
    }
    
    // Link input vars to pointers in C
    mx_r_in = mxGetPr(in[0]);
    int m = mxGetM(in[0]);
    int n = mxGetN(in[0]);
    
    // Input is a rotation matrix
    if (m == 3 && n == 3){
        output_dim = 1;
    }
    
    // Check input argument: avoid errors
    if (!((m == 3 && n == 3) || (m == 1 && n == 3) || (m == 3 && n == 1))){
        mexPrintf("HELP! ERROR! %d %d\n", m, n);
        exit(1);
    }
    
    // Create OpenCV array for input variable
    // If we want to use cvSetData, our matrices are actually the transposed
    // versions of those that come from Matlab.
    CvMat *r_in_T = cvCreateMatHeader(m, n, CV_64F);
    cvSetData (r_in_T, mx_r_in, sizeof(double)*n);
    
    // Transpose the matrix
    CvMat *r_in = cvCreateMat(n, m, CV_64F);
    cvT(r_in_T, r_in);
    
    // Result
    CvMat *r_out_T = cvCreateMat(output_dim, 3, CV_64F);
    
    // Call cvRodrigues
    cvRodrigues2(r_in, r_out_T);
    
    // Allocate memory for the output var
    out[0] = mxCreateNumericMatrix(3, output_dim, mxDOUBLE_CLASS, mxREAL);
    mx_r_out = mxGetPr(out[0]);    
    
    CvMat* r_out = cvCreateMatHeader(3, output_dim, CV_64F);
    cvSetData (r_out, mx_r_out, sizeof(double)*output_dim);
    cvT(r_out_T, r_out);

    // Free all array headers and return
    cvReleaseMat(&r_in);
    cvReleaseMatHeader(&r_in_T);
    cvReleaseMatHeader(&r_out);

}
Пример #11
0
void processUserInput(nub::ref<OutputFrameSeries> &ofs, bool &drawGrid, bool &saveCorners, bool &calibrate)
{
  static bool moveCamera = false;
  static bool itsChangeRot = false;

  switch(getKey(ofs))
  {
    case KEY_UP:
      if (!moveCamera)
      {
        if (itsChangeRot)
          cvmSet(itsCameraRotation, 0, 0, cvGetReal2D(itsCameraRotation, 0, 0) + M_PI/180);
        else
          cvmSet(itsCameraTranslation, 0, 1, cvGetReal2D(itsCameraTranslation, 0, 1) - 1);
      } else {
        //itsCameraCtrl->movePanTilt(1, 0, true); //move relative
      }

      break;
    case KEY_DOWN:
      if (!moveCamera)
      {
        if (itsChangeRot)
          cvmSet(itsCameraRotation, 0, 0, cvGetReal2D(itsCameraRotation, 0, 0) - M_PI/180);
        else
          cvmSet(itsCameraTranslation, 0, 1, cvGetReal2D(itsCameraTranslation, 0, 1) + 1);
      } else {
        //itsCameraCtrl->movePanTilt(-1, 0, true); //move relative
      }
      break;
    case KEY_LEFT:
      if (!moveCamera)
      {
        if (itsChangeRot)
          cvmSet(itsCameraRotation, 0, 1, cvGetReal2D(itsCameraRotation, 0, 1) + M_PI/180);
        else
          cvmSet(itsCameraTranslation, 0, 0, cvGetReal2D(itsCameraTranslation, 0, 0) - 1);
      } else {
        //itsCameraCtrl->movePanTilt(0, 1, true); //move relative
      }
      break;
    case KEY_RIGHT:
      if (!moveCamera)
      {
        if (itsChangeRot)
          cvmSet(itsCameraRotation, 0, 1, cvGetReal2D(itsCameraRotation, 0, 1) - M_PI/180);
        else
          cvmSet(itsCameraTranslation, 0, 0, cvGetReal2D(itsCameraTranslation, 0, 0) + 1);
      } else {
        //itsCameraCtrl->movePanTilt(0, -1, true); //move relative
      }
      break;
    case 38: //a
      if (!moveCamera)
      {
        if (itsChangeRot)
          cvmSet(itsCameraRotation, 0, 2, cvGetReal2D(itsCameraRotation, 0, 2) + M_PI/180);
        else
          cvmSet(itsCameraTranslation, 0, 2, cvGetReal2D(itsCameraTranslation, 0, 2) + 1);
      } else {
        //itsCameraCtrl->zoom(1, true); //move relative
      }
      break;
    case 52: //z
      if (!moveCamera)
      {
        if (itsChangeRot)
          cvmSet(itsCameraRotation, 0, 2, cvGetReal2D(itsCameraRotation, 0, 2) - M_PI/180);
        else
          cvmSet(itsCameraTranslation, 0, 2, cvGetReal2D(itsCameraTranslation, 0, 2) - 1);
      } else {
        //itsCameraCtrl->zoom(-1, true); //move relative
      }
      break;
    case 39: //s
      saveCorners = true;
      break;
    case 54: //c
      calibrate = true;
      break;
    case 42: //g
      drawGrid = !drawGrid;
      {
        double d0 = cvGetReal2D( itsDistortionCoeffs, 0, 0);
        double d1 = cvGetReal2D( itsDistortionCoeffs, 1, 0);
        double d2 = cvGetReal2D( itsDistortionCoeffs, 2, 0);
        double d3 = cvGetReal2D( itsDistortionCoeffs, 3, 0);
        printf( "distortion_coeffs ( %7.7lf, %7.7lf, %7.7lf, %7.7lf)\n", d0, d1, d2, d3);

        double ir00 = cvGetReal2D( itsIntrinsicMatrix, 0, 0);
        double ir01 = cvGetReal2D( itsIntrinsicMatrix, 0, 1);
        double ir02 = cvGetReal2D( itsIntrinsicMatrix, 0, 2);
        double ir10 = cvGetReal2D( itsIntrinsicMatrix, 1, 0);
        double ir11 = cvGetReal2D( itsIntrinsicMatrix, 1, 1);
        double ir12 = cvGetReal2D( itsIntrinsicMatrix, 1, 2);
        double ir20 = cvGetReal2D( itsIntrinsicMatrix, 2, 0);
        double ir21 = cvGetReal2D( itsIntrinsicMatrix, 2, 1);
        double ir22 = cvGetReal2D( itsIntrinsicMatrix, 2, 2);
        printf( "intrinsics ( %7.5lf, %7.5lf, %7.5lf)\n", ir00, ir01, ir02);
        printf( "           ( %7.5lf, %7.5lf, %7.5lf)\n", ir10, ir11, ir12);
        printf( "           ( %7.5lf, %7.5lf, %7.5lf)\n", ir20, ir21, ir22);

        printf( "Rotation: %f(%0.2fDeg) %f(%0.2fDeg) %f(%0.2fDeg)\n",
            cvGetReal2D(itsCameraRotation, 0, 0),
            cvGetReal2D(itsCameraRotation, 0, 0)*180/M_PI,
            cvGetReal2D(itsCameraRotation, 0, 1),
            cvGetReal2D(itsCameraRotation, 0, 1)*180/M_PI,
            cvGetReal2D(itsCameraRotation, 0, 2),
            cvGetReal2D(itsCameraRotation, 0, 2)*180/M_PI);
        printf( "Translation: %f %f %f\n",
            cvGetReal2D(itsCameraTranslation, 0, 0),
            cvGetReal2D(itsCameraTranslation, 0, 1),
            cvGetReal2D(itsCameraTranslation, 0, 2));

        CvMat *rot_mat = cvCreateMat( 3, 3, CV_64FC1);
        cvRodrigues2( itsCameraRotation, rot_mat, 0);

        printf( "Rotation Rot: %0.2f %0.2f %0.2f\n",
            cvGetReal2D(rot_mat, 0, 0)*180/M_PI,
            cvGetReal2D(rot_mat, 0, 1)*180/M_PI,
            cvGetReal2D(rot_mat, 0, 2)*180/M_PI);
        printf( "              %0.2f %0.2f %0.2f\n",
            cvGetReal2D(rot_mat, 1, 0)*180/M_PI,
            cvGetReal2D(rot_mat, 1, 1)*180/M_PI,
            cvGetReal2D(rot_mat, 1, 2)*180/M_PI);
        printf( "              %0.2f %0.2f %0.2f\n",
            cvGetReal2D(rot_mat, 2, 0)*180/M_PI,
            cvGetReal2D(rot_mat, 2, 1)*180/M_PI,
            cvGetReal2D(rot_mat, 2, 2)*180/M_PI);


        cvReleaseMat( &rot_mat);


      }
      break;
    case 27: //r
      itsChangeRot = !itsChangeRot;
      break;
  }
}
Пример #12
0
void projectGrid(Image<PixRGB<byte> > &img)
{

  //draw center point
  drawCircle(img, Point2D<int>(img.getWidth()/2, img.getHeight()/2), 3, PixRGB<byte>(255,0,0));

  CvMat *rot_mat = cvCreateMat( 3, 3, CV_64FC1);
  cvRodrigues2( itsCameraRotation, rot_mat, 0);

  int  NUM_GRID         = 9; //21
  CvMat *my_3d_point = cvCreateMat( 3, NUM_GRID * NUM_GRID + 2, CV_64FC1);
	CvMat *my_image_point = cvCreateMat( 2, NUM_GRID * NUM_GRID + 2, CV_64FC2);

  for ( int i = 0; i < NUM_GRID; i++){
                for ( int j = 0; j < NUM_GRID; j++){
                        cvSetReal2D( my_3d_point, 0, i * NUM_GRID + j,(i * GRID_SIZE));
                        cvSetReal2D( my_3d_point, 1, i * NUM_GRID + j,(j * GRID_SIZE));
                        cvSetReal2D( my_3d_point, 2, i * NUM_GRID + j, 0.0);
                }
        }

  cvSetReal2D( my_3d_point, 0, NUM_GRID*NUM_GRID, 0);
        cvSetReal2D( my_3d_point, 1, NUM_GRID*NUM_GRID, 0);
        cvSetReal2D( my_3d_point, 2, NUM_GRID*NUM_GRID, 0);

  cvSetReal2D( my_3d_point, 0, NUM_GRID*NUM_GRID+1, 0);
        cvSetReal2D( my_3d_point, 1, NUM_GRID*NUM_GRID+1, 0);
        cvSetReal2D( my_3d_point, 2, NUM_GRID*NUM_GRID+1, 30);

  cvProjectPoints2( my_3d_point,
                                        itsCameraRotation,
                                        itsCameraTranslation,
                                        itsIntrinsicMatrix,
                                        itsDistortionCoeffs,
                                        my_image_point);

  for ( int i = 0; i < NUM_GRID; i++){
                for ( int j = 0; j < NUM_GRID-1; j++){
                        int im_x1 = (int)cvGetReal2D( my_image_point, 0, i * NUM_GRID + j);
                        int im_y1 = (int)cvGetReal2D( my_image_point, 1, i * NUM_GRID + j);
                        int im_x2 = (int)cvGetReal2D( my_image_point, 0, i * NUM_GRID + j+1);
                        int im_y2 = (int)cvGetReal2D( my_image_point, 1, i * NUM_GRID + j+1);

                        cvLine( img2ipl(img), cvPoint( im_x1, im_y1), cvPoint( im_x2, im_y2), CV_RGB( 0, 255, 0), 1);
                }
        }
        for ( int j = 0; j < NUM_GRID; j++){
                for ( int i = 0; i < NUM_GRID-1; i++){
                        int im_x1 = (int)cvGetReal2D( my_image_point, 0, i * NUM_GRID + j);
                        int im_y1 = (int)cvGetReal2D( my_image_point, 1, i * NUM_GRID + j);
                        int im_x2 = (int)cvGetReal2D( my_image_point, 0, (i+1) * NUM_GRID + j);
                        int im_y2 = (int)cvGetReal2D( my_image_point, 1, (i+1) * NUM_GRID + j);

                        cvLine( img2ipl(img), cvPoint( im_x1, im_y1), cvPoint( im_x2, im_y2), CV_RGB( 0, 255, 0), 1);
                }
        }

        int im_x0 = (int)cvGetReal2D( my_image_point, 0, NUM_GRID*NUM_GRID);
        int im_y0 = (int)cvGetReal2D( my_image_point, 1, NUM_GRID*NUM_GRID);
        int im_x = (int)cvGetReal2D( my_image_point, 0, NUM_GRID*NUM_GRID+1);
        int im_y = (int)cvGetReal2D( my_image_point, 1, NUM_GRID*NUM_GRID+1);
        cvLine( img2ipl(img), cvPoint( im_x0, im_y0), cvPoint( im_x, im_y), CV_RGB( 255, 0, 0), 2); //Z axis


        cvReleaseMat( &my_3d_point);
        cvReleaseMat( &my_image_point);
  cvReleaseMat( &rot_mat);

}
int
main (int argc, char *argv[])
{
	// IMAGE_NUM, PAT_ROW, PAT_COL,PAT_SIZE, ALL_POINTS, CHESS_SIZE

	/*
	if (argc < 6)
	{
		std::cout<< "ERROR : augment is incorrect" << std::endl;
		return -1;
	}
	*/

	//int PAT_ROW   	= atoi(argv[3]);
	//int PAT_COL   	= atoi(argv[4]);
	//int CHESS_SIZE	= atoi(argv[5]);
	//int PAT_SIZE = PAT_ROW*PAT_COL;
	char* NAME_IMG_IN		= argv[1];
	//char* NAME_XML_OUT	= argv[2];

	int i,j;
	int corner_count, found;
	IplImage *src_img;
	CvSize pattern_size = cvSize(PAT_COL, PAT_ROW);
	CvMat image_points;
	CvMat object_points;
	CvMat *intrinsic, *distortion;
	CvMat *rotation = cvCreateMat(1, 3, CV_32FC1);
	CvMat *rotationConv = cvCreateMat(3, 3, CV_32FC1);
	CvMat *translation = cvCreateMat(1, 3, CV_32FC1);
	CvPoint3D32f objects[PAT_SIZE];
	CvFileStorage *fs;
	CvFileNode *param;
	CvPoint2D32f *corners = (CvPoint2D32f *) cvAlloc (sizeof (CvPoint2D32f) * PAT_SIZE);

	// (1)�����оݤȤʤ������ɤ߹���
	if ( ( src_img = cvLoadImage(NAME_IMG_IN, CV_LOAD_IMAGE_COLOR) ) == 0)
	//if (argc < 2 || (src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_COLOR)) == 0)
	{
		std::cout<< "ERROR : input image is not exist  or  augment is incorrect" << std::endl;
		return -1;
	}

	// 3�������ֺ�ɸ������
	for (i = 0; i < PAT_ROW; i++) {
		for (j = 0; j < PAT_COL; j++) {
			objects[i * PAT_COL + j].x = i * CHESS_SIZE;
			objects[i * PAT_COL + j].y = j * CHESS_SIZE;
			objects[i * PAT_COL + j].z = 0.0;
		}
	}
	cvInitMatHeader(&object_points, PAT_SIZE, 3, CV_32FC1, objects);

	// �������ܡ��ɡʥ����֥졼�����ѥ�����ˤΥ����ʡ�����
	int found_num = 0;
//	cvNamedWindow("Calibration", CV_WINDOW_AUTOSIZE);
	found = cvFindChessboardCorners(src_img, pattern_size, &corners[0], &corner_count);
	fprintf(stderr, "corner:%02d...\n", corner_count);
	if (found) {
		fprintf(stderr, "ok\n");
	} else {
		fprintf(stderr, "fail\n");
	}

	// (4)�����ʡ����֤򥵥֥ԥ��������٤˽���������
	IplImage *src_gray = cvCreateImage (cvGetSize (src_img), IPL_DEPTH_8U, 1);
	cvCvtColor (src_img, src_gray, CV_BGR2GRAY);
	cvFindCornerSubPix (src_gray, &corners[0], corner_count,
			cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
	cvDrawChessboardCorners (src_img, pattern_size, &corners[0], corner_count, found);

//	cvShowImage ("Calibration", src_img);
//	cvWaitKey (0);
//	cvDestroyWindow("Calibration");
	cvShowImage ("Calibration", src_img);

	cvInitMatHeader(&image_points, PAT_SIZE, 1, CV_32FC2, corners);


	// (2)�ѥ�᡼���ե�������ɤ߹���
	fs = cvOpenFileStorage ("xml/rgb.xml", 0, CV_STORAGE_READ);
	param = cvGetFileNodeByName (fs, NULL, "intrinsic");
	intrinsic = (CvMat *) cvRead (fs, param);
	param = cvGetFileNodeByName (fs, NULL, "distortion");
	distortion = (CvMat *) cvRead (fs, param);
	cvReleaseFileStorage (&fs);

	// (3) �����ѥ�᡼���ο���
	CvMat sub_image_points, sub_object_points;
	int base = 0;
	cvGetRows(&image_points, &sub_image_points, base * PAT_SIZE, (base + 1) * PAT_SIZE);
	cvGetRows(&object_points, &sub_object_points, base * PAT_SIZE, (base + 1)* PAT_SIZE);
	cvFindExtrinsicCameraParams2(&sub_object_points, &sub_image_points, intrinsic, distortion, rotation, translation);
	int ret = cvRodrigues2(rotation, rotationConv);

//	int cols = sub_object_points.rows;
//	printf("cols = %d\n", cols);
//	printf("%f\n",sub_object_points.data.fl[0]);

	// mm -> m
	for (i = 0; i < translation->cols; i++) { translation->data.fl[i] = translation->data.fl[i] / 1000;}

	// (4)XML�ե�����ؤν񤭽Ф�
	//fs = cvOpenFileStorage(argv[2], 0, CV_STORAGE_WRITE);
	fs = cvOpenFileStorage(NAME_XML_OUT, 0, CV_STORAGE_WRITE);
	cvWrite(fs, "rotation", rotationConv);
	cvWrite(fs, "translation", translation);
	cvReleaseFileStorage(&fs);

	/////////////////////////////////////////////////
	// write out py
	if(1)
	{
		cv::Mat ttt(translation);
		cv::Mat rrr(rotationConv);


		char data2Write[1024];
		char textFileName[256];
		sprintf( textFileName , "cbCoord/cbOneShot.py");
		std::ofstream outPy(textFileName);

		outPy << "import sys"						<<std::endl;
		outPy << "sys.path.append('../')"			<<std::endl;
		outPy << "from numpy import *"				<<std::endl;
		outPy << "from numpy.linalg import svd"	<<std::endl;
		outPy << "from numpy.linalg import inv"	<<std::endl;
		outPy << "from chessboard_points import *"<<std::endl;
		outPy << "sys.path.append('../geo')"		<<std::endl;
		outPy << "from geo import *"				<<std::endl;

		/*
		///////////////////////////////////////////////////////////////////////////////////
		// out translation and rotation as xyzabc list
		outPy << "xyzabc = []"	<<std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", ttt.at<float>(0) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", ttt.at<float>(1) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", ttt.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", rrr.at<float>(0) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", rrr.at<float>(1) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "xyzabc.append(%f)", rrr.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;
		// out translation and rotation as xyzabc list
		///////////////////////////////////////////////////////////////////////////////////
		 */

		///////////////////////////////////////////////////////////////////////////////////
		// out translation
		outPy << "ttt = []"	<<std::endl;

		sprintf( data2Write, "ttt.append(%f)", ttt.at<float>(0) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "ttt.append(%f)", ttt.at<float>(1) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "ttt.append(%f)", ttt.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;
		// out translation
		//////////////////////////////////////////////////////////////////////////////////////

		///////////////////////////////////////////////////////////////////////////////////
		// out rotation
		outPy << "rrr = []"	<<std::endl;

		sprintf( data2Write, "rrr.append([%f,%f,%f])", rrr.at<float>(0), rrr.at<float>(1), rrr.at<float>(2) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "rrr.append([%f,%f,%f])", rrr.at<float>(3), rrr.at<float>(4), rrr.at<float>(5) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;

		sprintf( data2Write, "rrr.append([%f,%f,%f])", rrr.at<float>(6), rrr.at<float>(7), rrr.at<float>(8) );
		outPy << data2Write << std::endl;
		std::cout << data2Write << std::endl;
		// out rotation
		//////////////////////////////////////////////////////////////////////////////////////

	
		/////////////////////////////////////////////////////////////////
		outPy<< "_T = FRAME( vec=ttt, mat=rrr )" << std::endl;
		/////////////////////////////////////////////////////////////////

	}
	// write out py
	/////////////////////////////////////////////////
	
	std::cout<< "press any key..."<< std::endl;
	cvWaitKey (0);
	cvDestroyWindow("Calibration");


	cvReleaseImage(&src_img);
	cvReleaseMat(&intrinsic);
	cvReleaseMat(&distortion);

	return 0;
}
Пример #14
0
static void calib(void)
{
    ARParam  param;
    CvMat    *objectPoints;
    CvMat    *imagePoints;
    CvMat    *pointCounts;
    CvMat    *intrinsics;
    CvMat    *distortionCoeff;
    CvMat    *rotationVectors;
    CvMat    *translationVectors;
    CvMat    *rotationVector;
    CvMat    *rotationMatrix;
    float    intr[3][4];
    float    dist[4];
    ARdouble trans[3][4];
    ARdouble cx, cy, cz, hx, hy, h, sx, sy, ox, oy, err;
    int      i, j, k, l;

    objectPoints       = cvCreateMat(capturedImageNum * chessboardCornerNumX * chessboardCornerNumY, 3, CV_32FC1);
    imagePoints        = cvCreateMat(capturedImageNum * chessboardCornerNumX * chessboardCornerNumY, 2, CV_32FC1);
    pointCounts        = cvCreateMat(capturedImageNum, 1, CV_32SC1);
    intrinsics         = cvCreateMat(3, 3, CV_32FC1);
    distortionCoeff    = cvCreateMat(1, 4, CV_32FC1);
    rotationVectors    = cvCreateMat(capturedImageNum, 3, CV_32FC1);
    translationVectors = cvCreateMat(capturedImageNum, 3, CV_32FC1);
    rotationVector     = cvCreateMat(1, 3, CV_32FC1);
    rotationMatrix     = cvCreateMat(3, 3, CV_32FC1);

    l = 0;

    for (k = 0; k < capturedImageNum; k++)
    {
        for (i = 0; i < chessboardCornerNumX; i++)
        {
            for (j = 0; j < chessboardCornerNumY; j++)
            {
                ((float*)(objectPoints->data.ptr + objectPoints->step * l))[0] = patternWidth * i;
                ((float*)(objectPoints->data.ptr + objectPoints->step * l))[1] = patternWidth * j;
                ((float*)(objectPoints->data.ptr + objectPoints->step * l))[2] = 0.0f;

                ((float*)(imagePoints->data.ptr + imagePoints->step * l))[0] = cornerSet[l].x;
                ((float*)(imagePoints->data.ptr + imagePoints->step * l))[1] = cornerSet[l].y;

                l++;
            }
        }

        ((int*)(pointCounts->data.ptr))[k] = chessboardCornerNumX * chessboardCornerNumY;
    }

    cvCalibrateCamera2(objectPoints, imagePoints, pointCounts, cvSize(xsize, ysize),
                       intrinsics, distortionCoeff, rotationVectors, translationVectors, 0);

    for (j = 0; j < 3; j++)
    {
        for (i = 0; i < 3; i++)
        {
            intr[j][i] = ((float*)(intrinsics->data.ptr + intrinsics->step * j))[i];
        }

        intr[j][3] = 0.0f;
    }

    for (i = 0; i < 4; i++)
    {
        dist[i] = ((float*)(distortionCoeff->data.ptr))[i];
    }

    convParam(intr, dist, xsize, ysize, &param); // COVHI10434 ignored.
    arParamDisp(&param);

    l = 0;

    for (k = 0; k < capturedImageNum; k++)
    {
        for (i = 0; i < 3; i++)
        {
            ((float*)(rotationVector->data.ptr))[i] = ((float*)(rotationVectors->data.ptr + rotationVectors->step * k))[i];
        }

        cvRodrigues2(rotationVector, rotationMatrix);

        for (j = 0; j < 3; j++)
        {
            for (i = 0; i < 3; i++)
            {
                trans[j][i] = ((float*)(rotationMatrix->data.ptr + rotationMatrix->step * j))[i];
            }

            trans[j][3] = ((float*)(translationVectors->data.ptr + translationVectors->step * k))[j];
        }

        // arParamDispExt(trans);

        err = 0.0;

        for (i = 0; i < chessboardCornerNumX; i++)
        {
            for (j = 0; j < chessboardCornerNumY; j++)
            {
                cx = trans[0][0] * patternWidth * i + trans[0][1] * patternWidth * j + trans[0][3];
                cy = trans[1][0] * patternWidth * i + trans[1][1] * patternWidth * j + trans[1][3];
                cz = trans[2][0] * patternWidth * i + trans[2][1] * patternWidth * j + trans[2][3];
                hx = param.mat[0][0] * cx + param.mat[0][1] * cy + param.mat[0][2] * cz + param.mat[0][3];
                hy = param.mat[1][0] * cx + param.mat[1][1] * cy + param.mat[1][2] * cz + param.mat[1][3];
                h  = param.mat[2][0] * cx + param.mat[2][1] * cy + param.mat[2][2] * cz + param.mat[2][3];
                if (h == 0.0)
                    continue;

                sx = hx / h;
                sy = hy / h;
                arParamIdeal2Observ(param.dist_factor, sx, sy, &ox, &oy, param.dist_function_version);
                sx   = ((float*)(imagePoints->data.ptr + imagePoints->step * l))[0];
                sy   = ((float*)(imagePoints->data.ptr + imagePoints->step * l))[1];
                err += (ox - sx) * (ox - sx) + (oy - sy) * (oy - sy);
                l++;
            }
        }

        err = sqrt(err / (chessboardCornerNumX * chessboardCornerNumY));
        ARLOG("Err[%2d]: %f[pixel]\n", k + 1, err);
    }

    saveParam(&param);

    cvReleaseMat(&objectPoints);
    cvReleaseMat(&imagePoints);
    cvReleaseMat(&pointCounts);
    cvReleaseMat(&intrinsics);
    cvReleaseMat(&distortionCoeff);
    cvReleaseMat(&rotationVectors);
    cvReleaseMat(&translationVectors);
    cvReleaseMat(&rotationVector);
    cvReleaseMat(&rotationMatrix);
}
bool
cvFindExtrinsicCameraParams3( const CvMat* obj_points,
                  const CvMat* img_points, const CvMat* A,
                  const CvMat* dist_coeffs,
                  CvMat* r_vec, CvMat* t_vec )
{
    bool fGood = true;

    const int max_iter = 20;
    CvMat *_M = 0, *_Mxy = 0, *_m = 0, *_mn = 0, *_L = 0, *_J = 0;
    
    CV_FUNCNAME( "cvFindExtrinsicCameraParams3" );

    __BEGIN__;

    int i, j, count;
    double a[9], k[4] = { 0, 0, 0, 0 }, R[9], ifx, ify, cx, cy;
    double Mc[3] = {0, 0, 0}, MM[9], U[9], V[9], W[3];
    double JtJ[6*6], JtErr[6], JtJW[6], JtJV[6*6], delta[6], param[6];
    CvPoint3D64f* M = 0;
    CvPoint2D64f *m = 0, *mn = 0;
    CvMat _a = cvMat( 3, 3, CV_64F, a );
    CvMat _R = cvMat( 3, 3, CV_64F, R );
    CvMat _r = cvMat( 3, 1, CV_64F, param );
    CvMat _t = cvMat( 3, 1, CV_64F, param + 3 );
    CvMat _Mc = cvMat( 1, 3, CV_64F, Mc );
    CvMat _MM = cvMat( 3, 3, CV_64F, MM );
    CvMat _U = cvMat( 3, 3, CV_64F, U );
    CvMat _V = cvMat( 3, 3, CV_64F, V );
    CvMat _W = cvMat( 3, 1, CV_64F, W );
    CvMat _JtJ = cvMat( 6, 6, CV_64F, JtJ );
    CvMat _JtErr = cvMat( 6, 1, CV_64F, JtErr );
    CvMat _JtJW = cvMat( 6, 1, CV_64F, JtJW );
    CvMat _JtJV = cvMat( 6, 6, CV_64F, JtJV );
    CvMat _delta = cvMat( 6, 1, CV_64F, delta );
    CvMat _param = cvMat( 6, 1, CV_64F, param );
    CvMat _dpdr, _dpdt;

    if( !CV_IS_MAT(obj_points) || !CV_IS_MAT(img_points) ||
        !CV_IS_MAT(A) || !CV_IS_MAT(r_vec) || !CV_IS_MAT(t_vec) )
        CV_ERROR( CV_StsBadArg, "One of required arguments is not a valid matrix" );

    count = MAX(obj_points->cols, obj_points->rows);
    CV_CALL( _M = cvCreateMat( 1, count, CV_64FC3 ));
    CV_CALL( _Mxy = cvCreateMat( 1, count, CV_64FC2 ));
    CV_CALL( _m = cvCreateMat( 1, count, CV_64FC2 ));
    CV_CALL( _mn = cvCreateMat( 1, count, CV_64FC2 ));
    M = (CvPoint3D64f*)_M->data.db;
    m = (CvPoint2D64f*)_m->data.db;
    mn = (CvPoint2D64f*)_mn->data.db;

    CV_CALL( cvConvertPointsHomogenious( obj_points, _M ));
    CV_CALL( cvConvertPointsHomogenious( img_points, _m ));
    CV_CALL( cvConvert( A, &_a ));

    if( dist_coeffs )
    {
        CvMat _k;
        if( !CV_IS_MAT(dist_coeffs) ||
            CV_MAT_DEPTH(dist_coeffs->type) != CV_64F &&
            CV_MAT_DEPTH(dist_coeffs->type) != CV_32F ||
            dist_coeffs->rows != 1 && dist_coeffs->cols != 1 ||
            dist_coeffs->rows*dist_coeffs->cols*CV_MAT_CN(dist_coeffs->type) != 4 )
            CV_ERROR( CV_StsBadArg,
                "Distortion coefficients must be 1x4 or 4x1 floating-point vector" );

        _k = cvMat( dist_coeffs->rows, dist_coeffs->cols,
                    CV_MAKETYPE(CV_64F,CV_MAT_CN(dist_coeffs->type)), k );
        CV_CALL( cvConvert( dist_coeffs, &_k ));
    }

    if( CV_MAT_DEPTH(r_vec->type) != CV_64F && CV_MAT_DEPTH(r_vec->type) != CV_32F ||
        r_vec->rows != 1 && r_vec->cols != 1 ||
        r_vec->rows*r_vec->cols*CV_MAT_CN(r_vec->type) != 3 )
        CV_ERROR( CV_StsBadArg, "Rotation vector must be 1x3 or 3x1 floating-point vector" );

    if( CV_MAT_DEPTH(t_vec->type) != CV_64F && CV_MAT_DEPTH(t_vec->type) != CV_32F ||
        t_vec->rows != 1 && t_vec->cols != 1 ||
        t_vec->rows*t_vec->cols*CV_MAT_CN(t_vec->type) != 3 )
        CV_ERROR( CV_StsBadArg,
            "Translation vector must be 1x3 or 3x1 floating-point vector" );

    ifx = 1./a[0]; ify = 1./a[4];
    cx = a[2]; cy = a[5];

    // normalize image points
    // (unapply the intrinsic matrix transformation and distortion)
    for( i = 0; i < count; i++ )
    {
        double x = (m[i].x - cx)*ifx, y = (m[i].y - cy)*ify, x0 = x, y0 = y;

        // compensate distortion iteratively
        if( dist_coeffs )
            for( j = 0; j < 5; j++ )
            {
                double r2 = x*x + y*y;
                double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2);
                double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
                double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
                x = (x0 - delta_x)*icdist;
                y = (y0 - delta_y)*icdist;
            }
        mn[i].x = x; mn[i].y = y;

        // calc mean(M)
        Mc[0] += M[i].x;
        Mc[1] += M[i].y;
        Mc[2] += M[i].z;
    }

    Mc[0] /= count;
    Mc[1] /= count;
    Mc[2] /= count;

    cvReshape( _M, _M, 1, count );
    cvMulTransposed( _M, &_MM, 1, &_Mc );
    cvSVD( &_MM, &_W, 0, &_V, CV_SVD_MODIFY_A + CV_SVD_V_T );

    // initialize extrinsic parameters
    if( W[2]/W[1] < 1e-3 || count < 4 )
    {
        // a planar structure case (all M's lie in the same plane)
        double tt[3], h[9], h1_norm, h2_norm;
        CvMat* R_transform = &_V;
        CvMat T_transform = cvMat( 3, 1, CV_64F, tt );
        CvMat _H = cvMat( 3, 3, CV_64F, h );
        CvMat _h1, _h2, _h3;

        if( V[2]*V[2] + V[5]*V[5] < 1e-10 )
            cvSetIdentity( R_transform );

        if( cvDet(R_transform) < 0 )
            cvScale( R_transform, R_transform, -1 );

        cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T );

        for( i = 0; i < count; i++ )
        {
            const double* Rp = R_transform->data.db;
            const double* Tp = T_transform.data.db;
            const double* src = _M->data.db + i*3;
            double* dst = _Mxy->data.db + i*2;

            dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0];
            dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1];
        }

        cvFindHomography( _Mxy, _mn, &_H );

        cvGetCol( &_H, &_h1, 0 );
        _h2 = _h1; _h2.data.db++;
        _h3 = _h2; _h3.data.db++;
        h1_norm = sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);
        h2_norm = sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);

        cvScale( &_h1, &_h1, 1./h1_norm );
        cvScale( &_h2, &_h2, 1./h2_norm );
        cvScale( &_h3, &_t, 2./(h1_norm + h2_norm));
        cvCrossProduct( &_h1, &_h2, &_h3 );

        cvRodrigues2( &_H, &_r );
        cvRodrigues2( &_r, &_H );
        cvMatMulAdd( &_H, &T_transform, &_t, &_t );
        cvMatMul( &_H, R_transform, &_R );
        cvRodrigues2( &_R, &_r );
    }
    else
    {
        // non-planar structure. Use DLT method
        double* L;
        double LL[12*12], LW[12], LV[12*12], sc;
        CvMat _LL = cvMat( 12, 12, CV_64F, LL );
        CvMat _LW = cvMat( 12, 1, CV_64F, LW );
        CvMat _LV = cvMat( 12, 12, CV_64F, LV );
        CvMat _RRt, _RR, _tt;

        CV_CALL( _L = cvCreateMat( 2*count, 12, CV_64F ));
        L = _L->data.db;

        for( i = 0; i < count; i++, L += 24 )
        {
            double x = -mn[i].x, y = -mn[i].y;
            L[0] = L[16] = M[i].x;
            L[1] = L[17] = M[i].y;
            L[2] = L[18] = M[i].z;
            L[3] = L[19] = 1.;
            L[4] = L[5] = L[6] = L[7] = 0.;
            L[12] = L[13] = L[14] = L[15] = 0.;
            L[8] = x*M[i].x;
            L[9] = x*M[i].y;
            L[10] = x*M[i].z;
            L[11] = x;
            L[20] = y*M[i].x;
            L[21] = y*M[i].y;
            L[22] = y*M[i].z;
            L[23] = y;
        }

        cvMulTransposed( _L, &_LL, 1 );
        cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T );
        _RRt = cvMat( 3, 4, CV_64F, LV + 11*12 );
        cvGetCols( &_RRt, &_RR, 0, 3 );
        cvGetCol( &_RRt, &_tt, 3 );
        if( cvDet(&_RR) < 0 )
            cvScale( &_RRt, &_RRt, -1 );
        sc = cvNorm(&_RR);
        cvSVD( &_RR, &_W, &_U, &_V, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
        cvGEMM( &_U, &_V, 1, 0, 0, &_R, CV_GEMM_A_T );
        cvScale( &_tt, &_t, cvNorm(&_R)/sc );
        cvRodrigues2( &_R, &_r );
        cvReleaseMat( &_L );
    }

    //
    // Check if new r and t are good
    //
    if ( cvGetReal1D( r_vec, 0 ) ||
         cvGetReal1D( r_vec, 1 ) ||
         cvGetReal1D( r_vec, 2 ) ||
         cvGetReal1D( t_vec, 0 ) ||
         cvGetReal1D( t_vec, 1 ) ||
         cvGetReal1D( t_vec, 2 ) )
    {
        //
        // perfom this only when the old r and t exist.
        //
        CvMat * R_inv = cvCreateMat( 3, 3, CV_64FC1 );
        CvMat * r_old = cvCreateMat( 3, 1, CV_64FC1 );
        CvMat * R_old = cvCreateMat( 3, 3, CV_64FC1 );
        CvMat * t_old = cvCreateMat( 3, 1, CV_64FC1 );
        // get new center
        cvInvert( &_R, R_inv );
        double new_center[3];
        CvMat newCenter = cvMat( 3, 1, CV_64FC1, new_center );
        cvMatMul( R_inv, &_t, &newCenter );
        cvScale( &newCenter, &newCenter, -1 );
        // get old center
        cvConvert( r_vec, r_old );
        cvConvert( t_vec, t_old );
        cvRodrigues2( r_old, R_old );
        cvInvert( R_old, R_inv );
        double old_center[3];
        CvMat oldCenter = cvMat( 3, 1, CV_64FC1, old_center );
        cvMatMul( R_inv, t_old, &oldCenter );
        cvScale( &oldCenter, &oldCenter, -1 );
        // get difference
        double diff_center = 0;
        for ( int i = 0; i < 3 ; i ++ )
            diff_center += pow( new_center[i] - old_center[i], 2 );
        diff_center = sqrt( diff_center );
        if ( diff_center > 300 )
        {
            printf("diff_center = %.2f --> set initial r and t as same as  the previous\n", diff_center);
            cvConvert(r_vec, &_r);
            cvConvert(t_vec, &_t);
            fGood = false;
        }
//        else printf("diff_center = %.2f\n", diff_center );
        
        cvReleaseMat( &R_inv );
        cvReleaseMat( &r_old );
        cvReleaseMat( &R_old );
        cvReleaseMat( &t_old );
    }
    
    if ( fGood )
    {
        CV_CALL( _J = cvCreateMat( 2*count, 6, CV_64FC1 ));
        cvGetCols( _J, &_dpdr, 0, 3 );
        cvGetCols( _J, &_dpdt, 3, 6 );

        // refine extrinsic parameters using iterative algorithm
        for( i = 0; i < max_iter; i++ )
        {
            double n1, n2;
            cvReshape( _mn, _mn, 2, 1 );
            cvProjectPoints2( _M, &_r, &_t, &_a, dist_coeffs,
                              _mn, &_dpdr, &_dpdt, 0, 0, 0 );
            cvSub( _m, _mn, _mn );
            cvReshape( _mn, _mn, 1, 2*count );

            cvMulTransposed( _J, &_JtJ, 1 );
            cvGEMM( _J, _mn, 1, 0, 0, &_JtErr, CV_GEMM_A_T );
            cvSVD( &_JtJ, &_JtJW, 0, &_JtJV, CV_SVD_MODIFY_A + CV_SVD_V_T );
            if( JtJW[5]/JtJW[0] < 1e-12 )
                break;
            cvSVBkSb( &_JtJW, &_JtJV, &_JtJV, &_JtErr,
                      &_delta, CV_SVD_U_T + CV_SVD_V_T );
            cvAdd( &_delta, &_param, &_param );
            n1 = cvNorm( &_delta );
            n2 = cvNorm( &_param );
            if( n1/n2 < 1e-10 )
                break;
        }

        _r = cvMat( r_vec->rows, r_vec->cols,
            CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), param );
        _t = cvMat( t_vec->rows, t_vec->cols,
            CV_MAKETYPE(CV_64F,CV_MAT_CN(t_vec->type)), param + 3 );

        cvConvert( &_r, r_vec );
        cvConvert( &_t, t_vec );
    }

    __END__;

    cvReleaseMat( &_M );
    cvReleaseMat( &_Mxy );
    cvReleaseMat( &_m );
    cvReleaseMat( &_mn );
    cvReleaseMat( &_L );
    cvReleaseMat( &_J );

    return fGood;
}
Пример #16
0
void ProjectionModel::calculateProjection()
{
    if(intrinsic_matrix != 0 && distortion_coeffs != 0)
    {
        int corner_count = Chessboard::board_total;

        cvCvtColor(sourceImg, gray_image, CV_RGB2GRAY);

        int found = cvFindChessboardCorners(gray_image,
                                            board_sz,
                                            corners,
                                            &corner_count,
                                            CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
        if(!found)
        {
            return;
        }

        cvFindCornerSubPix(gray_image,
                           corners,
                           corner_count,
                           cvSize(11,11),
                           cvSize(-1,-1),
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));

        objPts[0].x = 0;
        objPts[0].y = 0;
        objPts[1].x = Chessboard::board_w - 1;
        objPts[1].y = 0;
        objPts[2].x = 0;
        objPts[2].y = Chessboard::board_h - 1;
        objPts[3].x = Chessboard::board_w - 1;
        objPts[3].y = Chessboard::board_h - 1;
        imgPts[3] = corners[0];
        imgPts[2] = corners[Chessboard::board_w - 1];
        imgPts[1] = corners[(Chessboard::board_h - 1) * Chessboard::board_w];
        imgPts[0] = corners[(Chessboard::board_h - 1) * Chessboard::board_w + Chessboard::board_w - 1];

        cvGetPerspectiveTransform(objPts, imgPts, H);

        for(int i = 0; i < 4; ++i)
        {
            CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
            CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) = cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
        }

        cvFindExtrinsicCameraParams2(object_points,
                                     image_points,
                                     intrinsic_matrix,
                                     distortion_coeffs,
                                     rotation_vector,
                                     translation_vector);

        cvRodrigues2(rotation_vector, rotation_matrix);

        for(int f = 0; f < 3; f++)
        {
            for(int c = 0; c < 3; c++)
            {
                fullMatrix[c * 4 + f] = rotation_matrix->data.fl[f * 3 + c];   //transposed
            }
        }

        fullMatrix[3] = 0.0;
        fullMatrix[7] = 0.0;
        fullMatrix[11] = 0.0;
        fullMatrix[12] = translation_vector->data.fl[0];
        fullMatrix[13] = translation_vector->data.fl[1];
        fullMatrix[14] = translation_vector->data.fl[2];
        fullMatrix[15] = 1.0;
    }
}
void ImageOverlayer::initializeCamParams()
{
  int nframes=1;
  int n = 9; //Number of points used to process
  int N=nframes*n;
  
  vector<CvPoint2D32f> temp(n);
  vector<int> npoints;
  vector<CvPoint3D32f> objectPoints;
  vector<CvPoint2D32f> points[2];
  points[0].resize(n);
  points[1].resize(n);
  
  double R[3][3], T[3], E[3][3], F[3][3];
  double Q[4][4];
    

/*************************************************************/    
    
   _M1 = cvMat(3, 3, CV_64F, M1 );
   _M2 = cvMat(3, 3, CV_64F, M2 );
   _D1 = cvMat(1, 5, CV_64F, D1 );
   _D2 = cvMat(1, 5, CV_64F, D2 );
  CvMat _R = cvMat(3, 3, CV_64F, R );
  CvMat _T = cvMat(3, 1, CV_64F, T );
  CvMat _E = cvMat(3, 3, CV_64F, E );
  CvMat _F = cvMat(3, 3, CV_64F, F );
  CvMat _Q = cvMat(4,4, CV_64F, Q);
  
  vector<CvPoint2D32f>& pts = points[0];

  
  //Pontos XY pixels left
  points[0][0].x=34.0;
  points[0][0].y=336.0;
  points[0][1].x=502.0;
  points[0][1].y=156.0;
  points[0][2].x=1280.0;
  points[0][2].y=279.0;
  points[0][3].x=664.0;
  points[0][3].y=174.0;
  points[0][4].x=914.0;
  points[0][4].y=209.0;
  points[0][5].x=248.0;
  points[0][5].y=300.0;
  points[0][6].x=663.0;
  points[0][6].y=482.0;
  points[0][7].x=185.0;
  points[0][7].y=364.0;
  points[0][8].x=400.0;
  points[0][8].y=507.0;  

  
  //Pontos XY pixels right
  points[1][0].x=866.0;
  points[1][0].y=942.0;
  points[1][1].x=98.0;
  points[1][1].y=376.0;
  points[1][2].x=856.0;
  points[1][2].y=72.0;
  points[1][3].x=445.0;
  points[1][3].y=222.0;
  points[1][4].x=690.0;
  points[1][4].y=128.0;
  points[1][5].x=779.0;
  points[1][5].y=442.0;
  points[1][6].x=1162.0;
  points[1][6].y=161.0;
  points[1][7].x=1061.0;
  points[1][7].y=413.0;
  points[1][8].x=1244.0;
  points[1][8].y=215.0;
  
  
  npoints.resize(nframes,n);
  objectPoints.resize(nframes*n);
  
  /* 3D points (x,y,z) related to the 2D points from left and right image - minimum: 8 points*/
  
  objectPoints[0] = cvPoint3D32f(6.0,-4.5,0.0);
  objectPoints[1] = cvPoint3D32f(0.0,-4.5,0.0);
  objectPoints[2] = cvPoint3D32f(0.0,+4.5,0.0);
  objectPoints[3] = cvPoint3D32f(0.0,-1.5,0.0);
  objectPoints[4] = cvPoint3D32f(0.0,+1.5,0.0);
  objectPoints[5] = cvPoint3D32f(4.3,-2.7,0.0);
  objectPoints[6] = cvPoint3D32f(4.3,+2.7,0.0);
  objectPoints[7] = cvPoint3D32f(5.25,-1.75,0.0);
  objectPoints[8] = cvPoint3D32f(5.25,+1.75,0.0);  
  
  for( int i = 1; i < nframes; i++ )
      copy( objectPoints.begin(), objectPoints.begin() + n,
      objectPoints.begin() + i*n );
  
  CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
  CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
  CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
  CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
  
   
   /**************************************************************************/
  double R1[3][3], R2[3][3], P1[3][4], P2[3][4];

  CvMat _R1 = cvMat(3, 3, CV_64F, R1);
  CvMat _R2 = cvMat(3, 3, CV_64F, R2);
  CvMat _P1 = cvMat(3, 4, CV_64F, P1);
  CvMat _P2 = cvMat(3, 4, CV_64F, P2);

/************************************** StereoCalibration - returns R T R1 R2 T1 T2 **************************************/   
  CvSize imageSize = cvSize(1294,964);

  cvStereoCalibrate( &_objectPoints, &_imagePoints1,
      &_imagePoints2, &_npoints,
      &_M1, &_D1, &_M2, &_D2,
      imageSize, &_R, &_T, &_E, &_F,
      cvTermCriteria(CV_TERMCRIT_ITER+
      CV_TERMCRIT_EPS, 30, 1e-5),
      CV_CALIB_USE_INTRINSIC_GUESS+ CV_CALIB_FIX_ASPECT_RATIO);

     
/*************************************************************************************************************************/ 

/***************************************** Extrinsic Parameters **********************************************************/
  CvMat* Rvec_left = cvCreateMat( 3, 1, CV_64F );
  Tvec_left = cvCreateMat( 3, 1, CV_64F );
  CvMat* Rvec_right = cvCreateMat( 3, 1, CV_64F );
  Tvec_right = cvCreateMat( 3, 1, CV_64F ); 
  
  cvFindExtrinsicCameraParams2(&_objectPoints, &_imagePoints1,&_M1,&_D1,Rvec_left,Tvec_left);
  
  Rvec_left_n = cvCreateMat( 3, 3, CV_64F );
  cvRodrigues2(Rvec_left,Rvec_left_n,0);
    

  cvFindExtrinsicCameraParams2(&_objectPoints, &_imagePoints2,&_M2,&_D2,Rvec_right,Tvec_right);
  
  Rvec_right_n = cvCreateMat( 3, 3, CV_64F );
  cvRodrigues2(Rvec_right,Rvec_right_n,0);
    
/*************************************************************************************************************************/
}
Пример #18
0
/////////////////////////////////
// cv3dTrackerCalibrateCameras //
/////////////////////////////////
CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
                   const Cv3dTrackerCameraIntrinsics camera_intrinsics[], // size is num_cameras
                   CvSize etalon_size,
                   float square_size,
                   IplImage *samples[],                                   // size is num_cameras
                   Cv3dTrackerCameraInfo camera_info[])                   // size is num_cameras
{
    CV_FUNCNAME("cv3dTrackerCalibrateCameras");
    const int num_points = etalon_size.width * etalon_size.height;
    int cameras_done = 0;        // the number of cameras whose positions have been determined
    CvPoint3D32f *object_points = NULL; // real-world coordinates of checkerboard points
    CvPoint2D32f *points = NULL; // 2d coordinates of checkerboard points as seen by a camera
    IplImage *gray_img = NULL;   // temporary image for color conversion
    IplImage *tmp_img = NULL;    // temporary image used by FindChessboardCornerGuesses
    int c, i, j;

    if (etalon_size.width < 3 || etalon_size.height < 3)
        CV_ERROR(CV_StsBadArg, "Chess board size is invalid");

    for (c = 0; c < num_cameras; c++)
    {
        // CV_CHECK_IMAGE is not available in the cvaux library
        // so perform the checks inline.

        //CV_CALL(CV_CHECK_IMAGE(samples[c]));

        if( samples[c] == NULL )
            CV_ERROR( CV_HeaderIsNull, "Null image" );

        if( samples[c]->dataOrder != IPL_DATA_ORDER_PIXEL && samples[c]->nChannels > 1 )
            CV_ERROR( CV_BadOrder, "Unsupported image format" );

        if( samples[c]->maskROI != 0 || samples[c]->tileInfo != 0 )
            CV_ERROR( CV_StsBadArg, "Unsupported image format" );

        if( samples[c]->imageData == 0 )
            CV_ERROR( CV_BadDataPtr, "Null image data" );

        if( samples[c]->roi &&
            ((samples[c]->roi->xOffset | samples[c]->roi->yOffset
              | samples[c]->roi->width | samples[c]->roi->height) < 0 ||
             samples[c]->roi->xOffset + samples[c]->roi->width > samples[c]->width ||
             samples[c]->roi->yOffset + samples[c]->roi->height > samples[c]->height ||
             (unsigned) (samples[c]->roi->coi) > (unsigned) (samples[c]->nChannels)))
            CV_ERROR( CV_BadROISize, "Invalid ROI" );

        // End of CV_CHECK_IMAGE inline expansion

        if (samples[c]->depth != IPL_DEPTH_8U)
            CV_ERROR(CV_BadDepth, "Channel depth of source image must be 8");

        if (samples[c]->nChannels != 3 && samples[c]->nChannels != 1)
            CV_ERROR(CV_BadNumChannels, "Source image must have 1 or 3 channels");
    }

    CV_CALL(object_points = (CvPoint3D32f *)cvAlloc(num_points * sizeof(CvPoint3D32f)));
    CV_CALL(points = (CvPoint2D32f *)cvAlloc(num_points * sizeof(CvPoint2D32f)));

    // fill in the real-world coordinates of the checkerboard points
    FillObjectPoints(object_points, etalon_size, square_size);

    for (c = 0; c < num_cameras; c++)
    {
        CvSize image_size = cvSize(samples[c]->width, samples[c]->height);
        IplImage *img;

        // The input samples are not required to all have the same size or color
        // format. If they have different sizes, the temporary images are
        // reallocated as necessary.
        if (samples[c]->nChannels == 3)
        {
            // convert to gray
            if (gray_img == NULL || gray_img->width != samples[c]->width ||
                gray_img->height != samples[c]->height )
            {
                if (gray_img != NULL)
                    cvReleaseImage(&gray_img);
                CV_CALL(gray_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
            }
            
            CV_CALL(cvCvtColor(samples[c], gray_img, CV_BGR2GRAY));

            img = gray_img;
        }
        else
        {
            // no color conversion required
            img = samples[c];
        }

        if (tmp_img == NULL || tmp_img->width != samples[c]->width ||
            tmp_img->height != samples[c]->height )
        {
            if (tmp_img != NULL)
                cvReleaseImage(&tmp_img);
            CV_CALL(tmp_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
        }

        int count = num_points;
        bool found = cvFindChessBoardCornerGuesses(img, tmp_img, 0,
                                                   etalon_size, points, &count) != 0;
        if (count == 0)
            continue;
        
        // If found is true, it means all the points were found (count = num_points).
        // If found is false but count is non-zero, it means that not all points were found.

        cvFindCornerSubPix(img, points, count, cvSize(5,5), cvSize(-1,-1),
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.01f));

        // If the image origin is BL (bottom-left), fix the y coordinates
        // so they are relative to the true top of the image.
        if (samples[c]->origin == IPL_ORIGIN_BL)
        {
            for (i = 0; i < count; i++)
                points[i].y = samples[c]->height - 1 - points[i].y;
        }

        if (found)
        {
            // Make sure x coordinates are increasing and y coordinates are decreasing.
            // (The y coordinate of point (0,0) should be the greatest, because the point
            // on the checkerboard that is the origin is nearest the bottom of the image.)
            // This is done after adjusting the y coordinates according to the image origin.
            if (points[0].x > points[1].x)
            {
                // reverse points in each row
                for (j = 0; j < etalon_size.height; j++)
                {
                    CvPoint2D32f *row = &points[j*etalon_size.width];
                    for (i = 0; i < etalon_size.width/2; i++)
                        std::swap(row[i], row[etalon_size.width-i-1]);
                }
            }

            if (points[0].y < points[etalon_size.width].y)
            {
                // reverse points in each column
                for (i = 0; i < etalon_size.width; i++)
                {
                    for (j = 0; j < etalon_size.height/2; j++)
                        std::swap(points[i+j*etalon_size.width],
                                  points[i+(etalon_size.height-j-1)*etalon_size.width]);
                }
            }
        }

        DrawEtalon(samples[c], points, count, etalon_size, found);

        if (!found)
            continue;

        float rotVect[3];
        float rotMatr[9];
        float transVect[3];

        cvFindExtrinsicCameraParams(count,
                                    image_size,
                                    points,
                                    object_points,
                                    const_cast<float *>(camera_intrinsics[c].focal_length),
                                    camera_intrinsics[c].principal_point,
                                    const_cast<float *>(camera_intrinsics[c].distortion),
                                    rotVect,
                                    transVect);

        // Check result against an arbitrary limit to eliminate impossible values.
        // (If the chess board were truly that far away, the camera wouldn't be able to
        // see the squares.)
        if (transVect[0] > 1000*square_size
            || transVect[1] > 1000*square_size
            || transVect[2] > 1000*square_size)
        {
            // ignore impossible results
            continue;
        }

        CvMat rotMatrDescr = cvMat(3, 3, CV_32FC1, rotMatr);
        CvMat rotVectDescr = cvMat(3, 1, CV_32FC1, rotVect);

        /* Calc rotation matrix by Rodrigues Transform */
        cvRodrigues2( &rotVectDescr, &rotMatrDescr );

        //combine the two transformations into one matrix
        //order is important! rotations are not commutative
        float tmat[4][4] = { { 1.f, 0.f, 0.f, 0.f },
                             { 0.f, 1.f, 0.f, 0.f },
                             { 0.f, 0.f, 1.f, 0.f },
                             { transVect[0], transVect[1], transVect[2], 1.f } };
        
        float rmat[4][4] = { { rotMatr[0], rotMatr[1], rotMatr[2], 0.f },
                             { rotMatr[3], rotMatr[4], rotMatr[5], 0.f },
                             { rotMatr[6], rotMatr[7], rotMatr[8], 0.f },
                             { 0.f, 0.f, 0.f, 1.f } };


        MultMatrix(camera_info[c].mat, tmat, rmat);

        // change the transformation of the cameras to put them in the world coordinate 
        // system we want to work with.

        // Start with an identity matrix; then fill in the values to accomplish
        // the desired transformation.
        float smat[4][4] = { { 1.f, 0.f, 0.f, 0.f },
                             { 0.f, 1.f, 0.f, 0.f },
                             { 0.f, 0.f, 1.f, 0.f },
                             { 0.f, 0.f, 0.f, 1.f } };

        // First, reflect through the origin by inverting all three axes.
        smat[0][0] = -1.f;
        smat[1][1] = -1.f;
        smat[2][2] = -1.f;
        MultMatrix(tmat, camera_info[c].mat, smat);

        // Scale x and y coordinates by the focal length (allowing for non-square pixels
        // and/or non-symmetrical lenses).
        smat[0][0] = 1.0f / camera_intrinsics[c].focal_length[0];
        smat[1][1] = 1.0f / camera_intrinsics[c].focal_length[1];
        smat[2][2] = 1.0f;
        MultMatrix(camera_info[c].mat, smat, tmat);

        camera_info[c].principal_point = camera_intrinsics[c].principal_point;
        camera_info[c].valid = true;

        cameras_done++;
    }

exit:
    cvReleaseImage(&gray_img);
    cvReleaseImage(&tmp_img);
    cvFree(&object_points);
    cvFree(&points);

    return cameras_done == num_cameras;
}
Пример #19
0
int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
			Z += 0.5;
		if (key == 'd')
			Z -= 0.5;
	}

	//SHOW ROTATION AND TRANSLATION VECTORS
	CvMat *image_points = cvCreateMat(4, 1, CV_32FC2);
	CvMat *object_points = cvCreateMat(4, 1, CV_32FC3);
	for (int i = 0; i < 4; ++i) {
		CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
		CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) =
			cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
	}

	CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F);
	CvMat *Rot = cvCreateMat(3, 3, CV_32F);
	CvMat *Trans = cvCreateMat(3, 1, CV_32F);
	cvFindExtrinsicCameraParams2(object_points, image_points,
								 intrinsic, distortion, RotRodrigues, Trans);
	cvRodrigues2(RotRodrigues, Rot);

	//SAVE AND EXIT
	cvSave("Rot.xml", Rot);
	cvSave("Trans.xml", Trans);
	cvSave("H.xml", H);
	cvInvert(H, H_invt);
	cvSave("H_invt.xml", H_invt);	//Bottom row of H invert is horizon line
	return 0;
}
Пример #20
0
static
void icvRandomQuad( int width, int height, double quad[4][2],
                    double maxxangle,
                    double maxyangle,
                    double maxzangle )
{
    double distfactor = 3.0;
    double distfactor2 = 1.0;

    double halfw, halfh;
    int i;

    double rotVectData[3];
    double vectData[3];
    double rotMatData[9];

    CvMat rotVect;
    CvMat rotMat;
    CvMat vect;

    double d;

    rotVect = cvMat( 3, 1, CV_64FC1, &rotVectData[0] );
    rotMat = cvMat( 3, 3, CV_64FC1, &rotMatData[0] );
    vect = cvMat( 3, 1, CV_64FC1, &vectData[0] );

    rotVectData[0] = maxxangle * (2.0 * rand() / RAND_MAX - 1.0);
    rotVectData[1] = ( maxyangle - fabs( rotVectData[0] ) )
        * (2.0 * rand() / RAND_MAX - 1.0);
    rotVectData[2] = maxzangle * (2.0 * rand() / RAND_MAX - 1.0);
    d = (distfactor + distfactor2 * (2.0 * rand() / RAND_MAX - 1.0)) * width;

/*
    rotVectData[0] = maxxangle;
    rotVectData[1] = maxyangle;
    rotVectData[2] = maxzangle;

    d = distfactor * width;
*/

    cvRodrigues2( &rotVect, &rotMat );

    halfw = 0.5 * width;
    halfh = 0.5 * height;

    quad[0][0] = -halfw;
    quad[0][1] = -halfh;
    quad[1][0] =  halfw;
    quad[1][1] = -halfh;
    quad[2][0] =  halfw;
    quad[2][1] =  halfh;
    quad[3][0] = -halfw;
    quad[3][1] =  halfh;

    for( i = 0; i < 4; i++ )
    {
        rotVectData[0] = quad[i][0];
        rotVectData[1] = quad[i][1];
        rotVectData[2] = 0.0;
        cvMatMulAdd( &rotMat, &rotVect, 0, &vect );
        quad[i][0] = vectData[0] * d / (d + vectData[2]) + halfw;
        quad[i][1] = vectData[1] * d / (d + vectData[2]) + halfh;

        /*
        quad[i][0] += halfw;
        quad[i][1] += halfh;
        */
    }
}
RTC::ReturnCode_t ImageCalibration::onExecute(RTC::UniqueId ec_id)
{	
	
	board_sz = cvSize(m_board_w, m_board_h);
	
	//Calibrationパターンを計算する。
	if (m_inputImageIn.isNew()) {

		m_inputImageIn.read();

		if(m_keyIn.isNew()){
			m_keyIn.read();
			key = (int)m_key.data;
		}
		
		if(g_temp_w != m_inputImage.width || g_temp_h != m_inputImage.height){
		
			inputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			outputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			tempImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			undistortionImage = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			birds_image = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			
			intrinsicMatrix = cvCreateMat(3,3,CV_64FC1);
			distortionCoefficient = cvCreateMat(4,1,CV_64FC1);
			
			captureCount = 0;
			findFlag = 0;

			mapx = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);
			mapy = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);

			corners = new CvPoint2D32f[m_board_w * m_board_h];
			
			g_temp_w = m_inputImage.width;
			g_temp_h = m_inputImage.height;
		
		}

		//Capture開始する。
		memcpy(inputImage_buff->imageData,(void *)&(m_inputImage.pixels[0]), m_inputImage.pixels.length());

//		tempImage_buff = cvCloneImage(inputImage_buff);
		//OutPortに出力する。
		int len = inputImage_buff->nChannels * inputImage_buff->width * inputImage_buff->height;
		m_origImage.pixels.length(len);
		
		memcpy((void *)&(m_origImage.pixels[0]), inputImage_buff->imageData, len);
		m_origImage.width = inputImage_buff->width;
		m_origImage.height = inputImage_buff->height;

		m_origImageOut.write();
		
		//Capture確認用のWindowの生成
		//cvShowImage("Capture", inputImage_buff);
		cvWaitKey(1);
		
		//SpaceBarを押すとサンプル映像5枚を撮る
		if (key == ' ') {
			
			tempImage_buff = cvCloneImage(inputImage_buff);
			//映像を生成する
			IplImage *grayImage = cvCreateImage(cvGetSize(tempImage_buff), 8, 1);

			//行列の生成
			CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //世界座標用行列
			CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES ,2, CV_64FC1); //画像座標用行列
			CvMat *pointCounts = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 1, CV_32SC1); //コーナー数の行列
			CvMat *rotationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //回転ベクトル
			CvMat *translationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); 

			//世界座標を設定する
			for (int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				for ( int j = 0; j < (m_board_w * m_board_h); j++) {
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 0, (j % m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 1, (j / m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 2, 0.0);
				}
			}

			//コーナー数を設定
			for(int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				cvSetReal2D(pointCounts, i, 0, (m_board_w * m_board_h));
			}
			
			//コーナーを検出する。
			findFlag = findCorners(tempImage_buff, grayImage, corners);

			if (findFlag != 0) {
			
				//コーナーをすべて検出した場合
				//映像座標を設定する。
				for (;;){
					for (int i = 0; i < (m_board_w * m_board_h); i++){
 						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 0, corners[i].x);
						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 1, corners[i].y);
					}
				
					captureCount++;    

					printf("%d枚目キャプチャしました\n", captureCount);

					if (captureCount == NUM_OF_BACKGROUND_FRAMES) {
						//設定した回数チェックパターンを撮った場合
						//カメラパラメータを推定する。
						cvCalibrateCamera2(
							worldCoordinates,
							imageCoordinates,
							pointCounts,
							cvGetSize(inputImage_buff),
							intrinsicMatrix,
							distortionCoefficient,
							rotationVectors,
							translationVectors,
							CALIBRATE_CAMERA_FLAG
						);
						
						//情報をTextとして出力
						printf("\nレンズ歪み係数\n");
						saveRenseMatrix(distortionCoefficient);
						printMatrix("%lf", distortionCoefficient);
						
						//m_renseParameter.data = renseParameters;
												
						printf("\n内部パラメータ\n");
						saveInternalParameterMatrix(intrinsicMatrix);
						printMatrix("%lf ", intrinsicMatrix);

						//m_internalParameter.data = internalParameter;
						
						captureCount = 0;
						break;
						
					}
				}
			}

			if (findFlag != 0){
				InParameter = 1;
			}else if (findFlag == 0) {
				InParameter = 0;
			}
			
			//メモリ解除
			cvReleaseMat(&worldCoordinates);
			cvReleaseMat(&imageCoordinates);
			cvReleaseMat(&pointCounts);
			cvReleaseMat(&rotationVectors);
			cvReleaseMat(&translationVectors);
			cvReleaseImage(&grayImage);

		}
		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

	}
	//外部パターンを取得
	if (key == ' ' && m_inputImageIn.isNew() && InParameter == 1) {

		//行列の生成
		CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h), 3, CV_64FC1); //世界座標用行列
		CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h), 2, CV_64FC1); //画像座標用行列
		CvMat *rotationVectors = cvCreateMat(1, 3, CV_64FC1); //回転ベクトル
		CvMat *rotationMatrix = cvCreateMat(3, 3, CV_64FC1); //回転行列
		CvMat *translationVectors = cvCreateMat(1, 3, CV_64FC1); 

		//世界座標を設定する
		for (int i = 0; i < (m_board_w * m_board_h); i++){
			cvSetReal2D(worldCoordinates, i, 0, (i % m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 1, (i / m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 2, 0.0);
		}
	
		cvWaitKey( 1 );
	
		//	スペースキーが押されたら
		if ( findFlag != 0 ) {
			//	コーナーがすべて検出された場合
			//	画像座標を設定する
			for ( int i = 0; i < (m_board_w * m_board_h); i++ ){
				cvSetReal2D( imageCoordinates, i, 0, corners[i].x);
				cvSetReal2D( imageCoordinates, i, 1, corners[i].y);
			}

			//	外部パラメータを推定する
			cvFindExtrinsicCameraParams2(
				worldCoordinates,
				imageCoordinates,
				intrinsicMatrix,
				distortionCoefficient,
				rotationVectors,
				translationVectors
			);

			//	回転ベクトルを回転行列に変換する
			cvRodrigues2( rotationVectors, rotationMatrix, NULL );

			printf( "\n外部パラメータ\n" );
			printExtrinsicMatrix( rotationMatrix, translationVectors );
			saveExternalParameterMatrix(rotationMatrix, translationVectors);

			m_externalParameter.data = CORBA::string_dup(externalParameter);
			m_renseParameter.data = CORBA::string_dup(renseParameters);
			m_internalParameter.data = CORBA::string_dup(internalParameter);
						
		}
		//メモリを解放
		cvReleaseMat( &worldCoordinates );
		cvReleaseMat( &imageCoordinates );
		cvReleaseMat( &rotationVectors );
		cvReleaseMat( &rotationMatrix );
		cvReleaseMat( &translationVectors );
		
		//X,Y初期化
		cvInitUndistortMap(
			intrinsicMatrix,
			distortionCoefficient,
			mapx,
			mapy
		);
		//外部パラメータ確認フラグ
		outParameter = 1;
		key = 0;
				
	 }
	
	//内部外部パラメータの出力に成功したら
	if (InParameter == 1 && outParameter == 1) {

		//	レンズ歪みを補正した画像を生成する
		cvUndistort2(
			inputImage_buff,
			undistortionImage,
			intrinsicMatrix,
			distortionCoefficient
		);

		//cvShowImage("歪み補正", undistortionImage);

		//OutPortに補正映像を出力する。
		//int len = undistortionImage->nChannels * undistortionImage->width * undistortionImage->height;
		//m_calbImage.pixels.length(len);
		
		//歪み補正映像をOutPortとしてメモリコピーする。
		//memcpy((void *)&(m_calbImage.pixels[0]), undistortionImage->imageData, len);
		//m_calbImageOut.write();
		
		//鳥瞰図の座標設定
		objPts[0].x = 0;					objPts[0].y = 0;
		objPts[1].x = m_board_w-1;			objPts[1].y = 0;
		objPts[2].x = 0;					objPts[2].y = m_board_h-1;
		objPts[3].x = m_board_w-1;			objPts[3].y = m_board_h-1;
		
		//取得するCornerを設定
		imgPts[0] = corners[0];
		imgPts[1] = corners[m_board_w - 1];
		imgPts[2] = corners[(m_board_h - 1) * m_board_w];
		imgPts[3] = corners[(m_board_h - 1) * m_board_w + m_board_w - 1];
		
		//指定したCornerに○を作成する
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0,0,255), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0,255,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255,0,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255,255,0), 3);

		CvMat *H = cvCreateMat(3, 3, CV_32F);
		cvGetPerspectiveTransform(objPts, imgPts, H);
		
		//高さを設定する。
		CV_MAT_ELEM(*H, float, 2, 2) = m_camera_Height;
		
		//Warppingを実行
		cvWarpPerspective(inputImage_buff, birds_image, H, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		
		//鳥瞰図をOutPortに出力する。
		int len = birds_image->nChannels * birds_image->width * birds_image->height;
		m_birdImage.pixels.length(len);
		memcpy((void *)&(m_birdImage.pixels[0]), birds_image->imageData, len);

		m_birdImage.width = inputImage_buff->width;
		m_birdImage.height = inputImage_buff->height;

		m_birdImageOut.write();

		cvWaitKey(10);

		//cvShowImage("Bird_Eye", birds_image);
		cvReleaseMat(&H);

		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

		key = 0;

	}
Пример #22
0
bool PnPSolverOpenCV::pnpSolve(CorrespondenceSet correspondences, bool useLast)
{
	if (correspondences.size() < 4)
	{
		return false;
	}
    //create cvmat structures
    CvMat *modelPoints = cvCreateMat(correspondences.size(), 3, CV_32FC1);
    CvMat *projectedPoints = cvCreateMat(correspondences.size(), 2, CV_32FC1);
    CvMat *pointsCount = cvCreateMat(1, 1, CV_32SC1);

    CvMat *cameraMatrixCV = cvCreateMat(3, 3, CV_32FC1);
    cvSet(cameraMatrixCV, cvScalarAll(0));
    cameraMatrixCV->data.fl[2] = static_cast<float> (this->cameraMatrix(0,2));
    cameraMatrixCV->data.fl[5] = static_cast<float> (this->cameraMatrix(1,2));
    cameraMatrixCV->data.fl[0] = static_cast<float> (this->cameraMatrix(0,0));
    cameraMatrixCV->data.fl[4] = static_cast<float> (this->cameraMatrix(1,1));
    cameraMatrixCV->data.fl[8] = 1;

	CvMat *distMatrixCV = cvCreateMat(8, 1, CV_32FC1);
	cvSet(distMatrixCV, cvScalarAll(0));
	for(int i = 0; i < this->distCoeffs.cols()*this->distCoeffs.rows(); i++)
	{
		distMatrixCV->data.fl[i] = this->distCoeffs[i];
	}

    int counter = 0;
    for(CorrespondenceSet::iterator cit = correspondences.begin(); cit != correspondences.end(); cit++, counter++)
    {
        cvSet2D(modelPoints, counter, 0, cvScalar(cit->mx));
        cvSet2D(modelPoints, counter, 1, cvScalar(cit->my));
        cvSet2D(modelPoints, counter, 2, cvScalar(cit->mz));
        cvSet2D(projectedPoints, counter, 0, cvScalar(cit->px));
        cvSet2D(projectedPoints, counter, 1, cvScalar(cit->py));
    }
    cvSet2D(pointsCount, 0, 0, cvScalar(counter));
	
    CvMat *rotationMatrix = cvCreateMat(3, 3, CV_32FC1);
    CvMat *rotationVec = cvCreateMat(1, 3, CV_32FC1);
    cvSet(rotationVec, cvScalarAll(0));
    CvMat *translationVec = cvCreateMat(1, 3, CV_32FC1);
    cvSet(translationVec, cvScalarAll(0));

	if(useLast)
	{
		for(int ri = 0; ri < 3; ri++)
		{
			for(int ci = 0; ci < 3; ci++)
			{
				rotationMatrix->data.fl[ri*3 + ci] = static_cast<float>(this->worldTransformMatrix(ri, ci));
			}
			translationVec->data.fl[ri] = static_cast<float>(this->worldTransformMatrix(ri, 3));
		}
		
		cvRodrigues2(rotationMatrix, rotationVec);
	}

    //we do distortion stuff elsewhere
	cvFindExtrinsicCameraParams2(modelPoints, projectedPoints, cameraMatrixCV, distMatrixCV, rotationVec, translationVec, (useLast) ? 1 : 0);


	//cv::Mat mmodelPoints(modelPoints, true);
	//cv::Mat mprojectedPoints(projectedPoints, true);
	//cv::Mat mcameraMatrixCV(cameraMatrixCV, true);
	//cv::Mat mdistortion(8, 1, CV_32FC1); mdistortion.setTo(0);
	//cv::Mat mrotationVec(rotationVec);
	//cv::Mat mtranslationVec(translationVec);
	//cv::solvePnPRansac(mmodelPoints, mprojectedPoints, mcameraMatrixCV, mdistortion,
	//	mrotationVec, mtranslationVec, (useLast)?1:0,
	//	100, 3.0f, 13, cv::noArray(), 0);

    double rotMat[3][3];
    double transVec[3];

    cvRodrigues2(rotationVec, rotationMatrix);

	

    for(int ri = 0; ri < 3; ri++)
    {
        for(int ci = 0; ci < 3; ci++)
        {
            rotMat[ri][ci] = rotationMatrix->data.fl[ri*3 + ci];
        }
        transVec[ri] = translationVec->data.fl[ri];
    }

	
	if (transVec[2] < 0) //we are behind the marker - this shouldn't happen - invert it!
	{	
		//inverse rotation and translation
		for (int ri = 0; ri < 3; ri++)
		{
			for (int ci = 0; ci < 3; ci++)
			{
				rotationMatrix->data.fl[ri * 3 + ci] = rotMat[ci][ri];
			}
			translationVec->data.fl[ri] = -transVec[ri];
		}

		cvRodrigues2(rotationMatrix, rotationVec);

		cvFindExtrinsicCameraParams2(modelPoints, projectedPoints, cameraMatrixCV, distMatrixCV, rotationVec, translationVec, 1);

		cvRodrigues2(rotationVec, rotationMatrix);

		for (int ri = 0; ri < 3; ri++)
		{
			for (int ci = 0; ci < 3; ci++)
			{
				rotMat[ri][ci] = rotationMatrix->data.fl[ri * 3 + ci];
			}
			transVec[ri] = translationVec->data.fl[ri];
		}
	}

    cvReleaseMat(&rotationMatrix);
    cvReleaseMat(&modelPoints);
    cvReleaseMat(&projectedPoints);
    cvReleaseMat(&pointsCount);
    cvReleaseMat(&rotationVec);
    cvReleaseMat(&translationVec);
    cvReleaseMat(&cameraMatrixCV);

    this->worldTransformMatrix << rotMat[0][0], rotMat[0][1], rotMat[0][2], transVec[0],
            rotMat[1][0], rotMat[1][1], rotMat[1][2], transVec[1],
            rotMat[2][0], rotMat[2][1], rotMat[2][2], transVec[2],
            0.0, 0.0, 0.0, 1.0;

    this->worldPos = Eigen::Vector3d(transVec[0], transVec[1], transVec[2]);
    this->worldQuat = Eigen::Quaterniond(worldTransformMatrix.block<3,3>(0, 0));

    return true;
}