bool CameraProjectorCalibration::setDynamicProjectorImagePoints(cv::Mat img){
        
        vector<cv::Point2f> chessImgPts;
        bool bPrintedPatternFound = calibrationCamera.findBoard(img, chessImgPts, true);

        if(bPrintedPatternFound) {
            
            cv::Mat boardRot;
            cv::Mat boardTrans;
            calibrationCamera.computeCandidateBoardPose(chessImgPts, boardRot, boardTrans);
            
            const auto & camCandObjPts = calibrationCamera.getCandidateObjectPoints();
            Point3f axisX = camCandObjPts[1] - camCandObjPts[0];
            Point3f axisY = camCandObjPts[calibrationCamera.getPatternSize().width] - camCandObjPts[0];
            Point3f pos   = camCandObjPts[0] - axisY * (calibrationCamera.getPatternSize().width-2);
            
            vector<Point3f> auxObjectPoints;
            for(int i = 0; i < calibrationProjector.getPatternSize().height; i++) {
                for(int j = 0; j < calibrationProjector.getPatternSize().width; j++) {
                    auxObjectPoints.push_back(pos + axisX * float((2 * j) + (i % 2)) + axisY * i);
                }
            }
            
            Mat Rc1, Tc1, Rc1inv, Tc1inv, Rc2, Tc2, Rp1, Tp1, Rp2, Tp2;
            Rp1 = calibrationProjector.getBoardRotations().back();
            Tp1 = calibrationProjector.getBoardTranslations().back();
            Rc1 = calibrationCamera.getBoardRotations().back();
            Tc1 = calibrationCamera.getBoardTranslations().back();
            Rc2 = boardRot;
            Tc2 = boardTrans;
            
            Mat auxRinv = Mat::eye(3,3,CV_32F);
            Rodrigues(Rc1,auxRinv);
            auxRinv = auxRinv.inv();
            Rodrigues(auxRinv, Rc1inv);
            Tc1inv = -auxRinv*Tc1;
            Mat Raux, Taux;
            composeRT(Rc2, Tc2, Rc1inv, Tc1inv, Raux, Taux);
            composeRT(Raux, Taux, Rp1, Tp1, Rp2, Tp2);
            
            vector<Point2f> followingPatternImagePoints;
            projectPoints(Mat(auxObjectPoints),
                          Rp2, Tp2,
                          calibrationProjector.getDistortedIntrinsics().getCameraMatrix(),
                          calibrationProjector.getDistCoeffs(),
                          followingPatternImagePoints);
            
            calibrationProjector.setCandidateImagePoints(followingPatternImagePoints);
        }
        return bPrintedPatternFound;
    }
void BoardDetector::rotateXAxis(Mat &rotation) {
    cv::Mat R(3, 3, CV_32FC1);
    Rodrigues(rotation, R);
    // create a rotation matrix for x axis
    cv::Mat RX = cv::Mat::eye(3, 3, CV_32FC1);
    float angleRad = -M_PI / 2;
    RX.at< float >(1, 1) = cos(angleRad);
    RX.at< float >(1, 2) = -sin(angleRad);
    RX.at< float >(2, 1) = sin(angleRad);
    RX.at< float >(2, 2) = cos(angleRad);
    // now multiply
    R = R * RX;
    // finally, the the rodrigues back
    Rodrigues(R, rotation);
}
Beispiel #3
0
void transform3DPoints(const Mat &points,
                       const Mat &rvec,
                       const Mat &tvec,
                       Mat *modif_points)
{
  Mat transformation;

  if ((rvec.rows == 3 && rvec.cols == 1) || (rvec.rows == 1 && rvec.cols == 3))
  {
    Mat R;
    Rodrigues(rvec, R);

    cv::hconcat(R, tvec, transformation);
  }
  else
    if (rvec.rows == 3 && rvec.cols == 3)
    {
      cv::hconcat(rvec, tvec, transformation);
    }
    else
    {
      cerr << "wrong size!" << endl;
      return;
    }

  transform(points, *modif_points, transformation);
}
Beispiel #4
0
bool Camera::calculateExtrinsicParams(vector<Point3f> objectPoints, vector<Point2f> imagePoints)
{
	OPENCV_ASSERT(isCalibrated,"Camera.calculateExtrinsicParams","Cannot calculate extrinsic parameters before camera calibration!");
	Mat rvec, tvec;
	Mat rotMtx;
	bool solverResult = solvePnP(
		Mat(objectPoints), Mat(imagePoints),	// Input correspondences
		cameraMatrix, distortionCoeffs,	// Intrinsic camera parameters (have to be already available)
		rvec, tvec);

	if (solverResult)
	{
		// Create this->T from rvec and tvec
		Rodrigues(rvec, rotMtx);
		Matx44f T_inv = Matx44f(
			(float)rotMtx.at<double>(0,0), (float)rotMtx.at<double>(0,1), (float)rotMtx.at<double>(0,2), (float)tvec.at<double>(0,0),
			(float)rotMtx.at<double>(1,0), (float)rotMtx.at<double>(1,1), (float)rotMtx.at<double>(1,2), (float)tvec.at<double>(1,0),
			(float)rotMtx.at<double>(2,0), (float)rotMtx.at<double>(2,1), (float)rotMtx.at<double>(2,2), (float)tvec.at<double>(2,0),
			0.0F, 0.0F, 0.0F, 1.0F
			);
		T = T_inv.inv();
		isTSet=true;
	}

	return solverResult;
}
void CameraCalibration::CvtCameraExtrins(const vector<Mat> *RVecs, const vector<Mat> *TVecs)
{

    Mat rot3x3;
    OpenGlMatrixSpec P = IdentityMatrix(GlModelViewStack);

	if(stereo_mode)
	{
		CamExtrins = new vector<OpenGlMatrixSpec>[2];
		for(int i=0; i<NumFrames; i++)
		{
			for(int c_idx=0; c_idx<2; c_idx++)
			{
				Rodrigues(RVecs[c_idx].at(i), rot3x3);
				for(int col=0; col<3; col++)
					for(int row=0; row<3; row++)
						P.m[col*4+row] = rot3x3.at<double>(row, col);
				copy(TVecs[c_idx].at(i).ptr<double>(), TVecs[c_idx].at(i).ptr<double>()+3, &P.m[12]);
				CamExtrins[c_idx].push_back(P);
			}
		}

		L2RExtrins = IdentityMatrix(GlModelViewStack);
		for(int col=0; col<3; col++)
			for(int row=0; row<3; row++)
				L2RExtrins.m[col*4+row] = stereo_params->R.at<double>(row, col);
		
		copy(stereo_params->T.ptr<double>(), stereo_params->T.ptr<double>()+3, &L2RExtrins.m[12]);

	}
	else
	{
		CamExtrins = new vector<OpenGlMatrixSpec>;
		for(int i=0; i<NumFrames; i++)
		{
			Rodrigues(RVecs->at(i), rot3x3);
			for(int col=0; col<3; col++)
				for(int row=0; row<3; row++)
					P.m[col*4+row] = rot3x3.at<double>(row, col);
			copy(TVecs->at(i).ptr<double>(), TVecs->at(i).ptr<double>()+3, &P.m[12]);
			CamExtrins->push_back(P);
		}
    }
}
double CalibCameraWrapperEx(
    cv::Point3f const * pts3d, cv::Point2f const * pts2d, unsigned int ptCount,
    unsigned int imgWidth, unsigned int imgHeight, float * intrinsicMatInOut,
    float * rotationOut, float * translationOut )
{
    cv::Mat distCoeffs_est;
    std::vector<cv::Mat> rvecs(1), tvecs(1);

    cv::Mat1d camMat_est(3,3, 0.);
    if( intrinsicMatInOut )
    {
        cv::Mat1f(3,3,intrinsicMatInOut).convertTo( camMat_est, CV_64F );
    }

    std::vector<std::vector<cv::Point3f>> test3d_list( 1 );
    test3d_list.front().assign( pts3d, pts3d + ptCount );
    
    std::vector<std::vector<cv::Point2f>> test2d_list( 1 );
    test2d_list.front().assign( pts2d, pts2d + ptCount );

    cv::Size2f const imgSize(imgWidth,imgHeight);

    int const flags = 
        (intrinsicMatInOut ? CV_CALIB_USE_INTRINSIC_GUESS : 0) |
        CV_CALIB_ZERO_TANGENT_DIST |
        CV_CALIB_FIX_K2 | CV_CALIB_FIX_K3 | CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5 | CV_CALIB_FIX_K6;

    double rep_err = cv::calibrateCamera(test3d_list, test2d_list, imgSize,
        camMat_est, distCoeffs_est, rvecs, tvecs, flags );

    if( translationOut )
    {
        cv::Mat1f tempOut(1,3,translationOut);
        tvecs.front().convertTo( tempOut, CV_32F );
    }

    if( rotationOut )
    {
        cv::Mat1d temp(3,3);
        Rodrigues( rvecs.front(), temp );

        cv::Mat1f tempOut(3,3,rotationOut);
        temp.convertTo( tempOut, CV_32F );
    }

    if( intrinsicMatInOut )
    {
        cv::Mat1f tempOut(3,3,intrinsicMatInOut);
        camMat_est.convertTo( tempOut, CV_32F );
    }

    return rep_err;
}
 void project3dPoints(const Mat& points, const Mat& rvec, const Mat& tvec, Mat& modif_points)
 {
     modif_points.create(1, points.cols, CV_32FC3);
     Mat R(3, 3, CV_64FC1);
     Rodrigues(rvec, R);
     Mat transformation(3, 4, CV_64F);
     Mat r = transformation.colRange(0, 3);
     R.copyTo(r);
     Mat t = transformation.colRange(3, 4);
     tvec.copyTo(t);
     transform(points, modif_points, transformation);
 }
Beispiel #8
0
void
DynNewtonian::streamParticle(Particle &particle, const double &dt) const
{
    particle.getPosition() += particle.getVelocity() * dt;

    //The Vector copy is required to make sure that the cached
    //orientation doesn't change during calculation
    if (hasOrientationData())
        orientationData[particle.getID()].orientation
            = Rodrigues(orientationData[particle.getID()].angularVelocity * dt)
              * Vector(orientationData[particle.getID()].orientation);
}
Beispiel #9
0
Eigen::Isometry3d transformEstimation(const Mat& rgb1,const Mat& rgb2,const Mat& depth1,const Mat& depth2,const CAMERA_INTRINSIC_PARAMETERS& C)
{
    vector<KeyPoint> keyPts1,keyPts2;
    Mat descriptors1,descriptors2;

    extractKeypointsAndDescripotrs(rgb1,rgb2,keyPts1,keyPts2,descriptors1,descriptors2);


    vector<DMatch> matches;
    descriptorsMatch(rgb1,rgb2,keyPts1,keyPts2,descriptors1,descriptors2,matches);


    vector<Point2f> points;
    vector<Point3f> objectPoints;
    vector<Eigen::Vector2d> imagePoints1,imagePoints2;

    getObjectPointsAndImagePoints(depth1,depth2,keyPts1,keyPts2,matches,points,objectPoints,imagePoints1,imagePoints2,C);

    Mat translation,rotation;
    double camera_matrix_data[3][3] = {
        {C.fx, 0, C.cx},
        {0, C.fy, C.cy},
        {0, 0, 1}    };

    Mat cameraMatrix(3,3,CV_64F,camera_matrix_data);


    solvePnPRansac(objectPoints,points,cameraMatrix,Mat(),rotation,translation,false, 100, 1.0, 20);


    Mat rot;
    Rodrigues(rotation,rot);

    Eigen::Matrix3d r;
    Eigen::Vector3d t;

    cout<<rot<<endl;
    cout<<translation<<endl;

    r<< ((double*)rot.data)[0],((double*)rot.data)[1],((double*)rot.data)[2],
            ((double*)rot.data)[3],((double*)rot.data)[4],((double*)rot.data)[5],
            ((double*)rot.data)[6],((double*)rot.data)[7],((double*)rot.data)[8];
    t<<((double*)translation.data)[0],((double*)translation.data)[1],((double*)translation.data)[2];

    Eigen::Isometry3d T = Eigen::Isometry3d::Identity();
    T.rotate(r);
    T.pretranslate(t);
    cout<<T.matrix()<<endl;

    BundleAdjustmentOptimization(objectPoints,imagePoints1,imagePoints2);

    return T;
}
Beispiel #10
0
	/*

	void point3d2Mat(const Point3d& src, Mat& dest)
	{
	dest.create(3,1,CV_64F);
	dest.at<double>(0,0)=src.x;
	dest.at<double>(1,0)=src.y;
	dest.at<double>(2,0)=src.z;
	}

	void setXYZ(Mat& in, double&x, double&y, double&z)
	{
	x=in.at<double>(0,0);
	y=in.at<double>(1,0);
	z=in.at<double>(2,0);

	//	cout<<format("set XYZ: %.04f %.04f %.04f\n",x,y,z);
	}

	void lookatBF(const Point3d& from, const Point3d& to, Mat& destR)
	{
	double x,y,z;

	Mat fromMat;
	Mat toMat;
	point3d2Mat(from,fromMat);
	point3d2Mat(to,toMat);

	Mat fromtoMat;
	add(toMat,fromMat,fromtoMat,Mat(),CV_64F);
	double ndiv = 1.0/norm(fromtoMat);
	fromtoMat*=ndiv;

	setXYZ(fromtoMat,x,y,z);
	destR = Mat::eye(3,3,CV_64F);
	double yaw   =-z/abs(z)*asin(y/sqrt(y*y+z*z))/CV_PI*180.0;

	rotYaw(destR,destR,yaw);

	Mat RfromtoMat = destR*fromtoMat;

	setXYZ(RfromtoMat,x,y,z);
	double pitch =z/abs(z)*asin(x/sqrt(x*x+z*z))/CV_PI*180.0;

	rotPitch(destR,destR,pitch);
	}
	*/
	void lookat(const Point3d& from, const Point3d& to, Mat& destR)
	{
		Mat destMat = Mat(Point3d(0.0, 0.0, 1.0));
		Mat srcMat = Mat(from + to);
		srcMat = srcMat / norm(srcMat);

		Mat rotaxis = srcMat.cross(destMat);
		double angle = acos(srcMat.dot(destMat));
		//normalize cross product and multiply rotation angle
		rotaxis = rotaxis / norm(rotaxis)*angle;
		Rodrigues(rotaxis, destR);
	}
Beispiel #11
0
int solveP3P( InputArray _opoints, InputArray _ipoints,
              InputArray _cameraMatrix, InputArray _distCoeffs,
              OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags) {
    CV_INSTRUMENT_REGION();

    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints == 3 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
    CV_Assert( flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P );

    Mat cameraMatrix0 = _cameraMatrix.getMat();
    Mat distCoeffs0 = _distCoeffs.getMat();
    Mat cameraMatrix = Mat_<double>(cameraMatrix0);
    Mat distCoeffs = Mat_<double>(distCoeffs0);

    Mat undistortedPoints;
    undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
    std::vector<Mat> Rs, ts;

    int solutions = 0;
    if (flags == SOLVEPNP_P3P)
    {
        p3p P3Psolver(cameraMatrix);
        solutions = P3Psolver.solve(Rs, ts, opoints, undistortedPoints);
    }
    else if (flags == SOLVEPNP_AP3P)
    {
        ap3p P3Psolver(cameraMatrix);
        solutions = P3Psolver.solve(Rs, ts, opoints, undistortedPoints);
    }

    if (solutions == 0) {
        return 0;
    }

    if (_rvecs.needed()) {
        _rvecs.create(solutions, 1, CV_64F);
    }

    if (_tvecs.needed()) {
        _tvecs.create(solutions, 1, CV_64F);
    }

    for (int i = 0; i < solutions; i++) {
        Mat rvec;
        Rodrigues(Rs[i], rvec);
        _tvecs.getMatRef(i) = ts[i];
        _rvecs.getMatRef(i) = rvec;
    }

    return solutions;
}
Beispiel #12
0
	void Epipolar::getEssMatrix(Mat& rot1, Mat& rot2, Mat& trans1, Mat& trans2, Mat& approxEss){
		Mat rot1Mat, rot2Mat;
		if (rot1.size() == Size(3, 3))
		{
			rot1Mat = rot1;
		}
		else
		{
			Rodrigues(rot1, rot1Mat);
		}
		if (rot2.size() == Size(3, 3))
		{
			rot2Mat = rot2;
		}
		else
		{
			Rodrigues(rot2, rot2Mat);
		}
		Mat relRot = rot1Mat.inv() * rot2Mat;
		Mat relTrans = trans2 - trans1;
		getEssMatrix(relRot, relTrans, approxEss);
	}
void Marker::glGetModelViewMatrix(   double modelview_matrix[16])throw(cv::Exception)
{
    //check if paremeters are valid
    bool invalid=false;
    for (int i=0;i<3 && !invalid ;i++)
    {
        if (Tvec.at<float>(i,0)!=-999999) invalid|=false;
        if (Rvec.at<float>(i,0)!=-999999) invalid|=false;
    }
    if (invalid) throw cv::Exception(9003,"extrinsic parameters are not set","Marker::getModelViewMatrix",__FILE__,__LINE__);
    Mat Rot(3,3,CV_32FC1),Jacob;
    Rodrigues(Rvec, Rot, Jacob);

    double para[3][4];
    for (int i=0;i<3;i++)
        for (int j=0;j<3;j++) para[i][j]=Rot.at<float>(i,j);
    //now, add the translation
    para[0][3]=Tvec.at<float>(0,0);
    para[1][3]=Tvec.at<float>(1,0);
    para[2][3]=Tvec.at<float>(2,0);
    double scale=1;

    modelview_matrix[0 + 0*4] = para[0][0];
    // R1C2
    modelview_matrix[0 + 1*4] = para[0][1];
    modelview_matrix[0 + 2*4] = para[0][2];
    modelview_matrix[0 + 3*4] = para[0][3];
    // R2
    modelview_matrix[1 + 0*4] = para[1][0];
    modelview_matrix[1 + 1*4] = para[1][1];
    modelview_matrix[1 + 2*4] = para[1][2];
    modelview_matrix[1 + 3*4] = para[1][3];
    // R3
    modelview_matrix[2 + 0*4] = -para[2][0];
    modelview_matrix[2 + 1*4] = -para[2][1];
    modelview_matrix[2 + 2*4] = -para[2][2];
    modelview_matrix[2 + 3*4] = -para[2][3];

    modelview_matrix[3 + 0*4] = 0.0;
    modelview_matrix[3 + 1*4] = 0.0;
    modelview_matrix[3 + 2*4] = 0.0;
    modelview_matrix[3 + 3*4] = 1.0;
    if (scale != 0.0)
    {
        modelview_matrix[12] *= scale;
        modelview_matrix[13] *= scale;
        modelview_matrix[14] *= scale;
    }


}
Beispiel #14
0
	ofMatrix4x4 makeMatrix(Mat rotation, Mat translation) {
		Mat rot3x3;
		if(rotation.rows == 3 && rotation.cols == 3) {
			rot3x3 = rotation;
		} else {
			Rodrigues(rotation, rot3x3);
		}
		double* rm = rot3x3.ptr<double>(0);
		double* tm = translation.ptr<double>(0);
		return ofMatrix4x4(rm[0], rm[3], rm[6], 0.0f,
                           rm[1], rm[4], rm[7], 0.0f,
                           rm[2], rm[5], rm[8], 0.0f,
                           tm[0], tm[1], tm[2], 1.0f);
	}
Beispiel #15
0
	std::array<double, nderivs> eval(const double dt = 0) const
	{
	  math::Vector u1new = Rodrigues(w1 * dt) * math::Vector(u1);
	  math::Vector u2new = Rodrigues(w2 * dt) * math::Vector(u2);

	  const double colldiam = 0.5 * (_diameter1 + _diameter2);
	  const double growthfactor = 1 + _invgamma * (_t + dt);
	  const math::Vector rij = r12 + dt * v12 + growthfactor * (u1new - u2new);
	  const math::Vector vij = v12 + growthfactor * ((w1 ^ u1new) - (w2 ^ u2new)) + _invgamma * (u1new - u2new);
	  const math::Vector aij = growthfactor * (-w1.nrm2() * u1new + w2.nrm2() * u2new) + 2 * _invgamma * ((w1 ^ u1new) - (w2 ^ u2new));
	  const math::Vector dotaij = growthfactor * (-w1.nrm2() * (w1 ^ u1new) + w2.nrm2() * (w2 ^ u2new)) + 3 * _invgamma * (-w1.nrm2() * u1new + w2.nrm2() * u2new);

	  std::array<double, nderivs> retval;
	  for (size_t i(0); i < nderivs; ++i)
	    switch (first_deriv + i) {
	    case 0: retval[i] = (rij | rij) - growthfactor * growthfactor * colldiam * colldiam; break;
	    case 1: retval[i] = 2 * (rij | vij) - 2 * _invgamma * growthfactor * colldiam * colldiam; break;
	    case 2: retval[i] = 2 * vij.nrm2() + 2 * (rij | aij) - 2 * _invgamma * _invgamma * colldiam * colldiam; break;
	    case 3: retval[i] = 6 * (vij | aij) + 2 * (rij | dotaij); break;
	    default:
	      M_throw() << "Invalid access";
	    }
	  return retval;
	}
Beispiel #16
0
 void Marker::rotateXAxis( cv::Mat& rotation)
 {
     cv::Mat R(3, 3, CV_32F);
      cv::Rodrigues(rotation, R);
     // create a rotation matrix for x axis
     cv::Mat RX = cv::Mat::eye(3, 3, CV_32F);
     const float angleRad = 3.14159265359f / 2.f;
     RX.at<float>(1, 1) = cos(angleRad);
     RX.at<float>(1, 2) = -sin(angleRad);
     RX.at<float>(2, 1) = sin(angleRad);
     RX.at<float>(2, 2) = cos(angleRad);
     // now multiply
     R = R * RX;
     // finally, the the rodrigues back
     Rodrigues(R, rotation);
 }
        void PositionCalculatorImpl::addMeasurementImpl( InputArray _tvec, InputArray _rvec, const Point2f _pt
                                                         , double /*time*/,
                                                         InputArray _cameraMatrix, InputArray _distortionMatrix )
        {
            Mat tvec = _tvec.getMat();
            Mat rvec = _rvec.getMat();
            Mat camera_matrix = _cameraMatrix.getMat();
            const Mat distortion_matrix = _distortionMatrix.getMat();

            std::vector< Point2f > pts_in, pts_out;
            pts_in.push_back( _pt );
            undistortPoints( pts_in, pts_out, camera_matrix, distortion_matrix, noArray(), camera_matrix );

            Mat los( 3, 1,CV_64F );

            los.at< double >( 0 ) = pts_out[0].x;
            los.at< double >( 1 ) = pts_out[0].y;
            los.at< double >( 2 ) = 1;

            if ( camera_matrix.type() != CV_64F )
                camera_matrix.convertTo( camera_matrix, CV_64F );
            if ( rvec.type() != CV_64F )
                rvec.convertTo( rvec, CV_64F );
            if ( tvec.type() != CV_64F )
                tvec.convertTo( tvec, CV_64F );

            los = camera_matrix.inv() * los;

            Mat camera_rotation;
            if ( rvec.rows == 3 && rvec.cols == 3 )
                camera_rotation = rvec;
            else
                Rodrigues( rvec, camera_rotation );

            if(tvec.rows == 1)
                tvec = tvec.t();

            camera_rotation = camera_rotation.t();
            const Mat camera_translation = ( -camera_rotation * tvec );
            los = camera_rotation * los;

            positions.push_back( camera_translation );
            Mat zero_pos( 3, 1, CV_64F );
            zero_pos.setTo( 0 );
            const Point2d azel = getAzEl( zero_pos, los );
            angles.push_back( azel );
        }
 // some crazy stuff, no idea what's happening there. So thanks Alvaro & Niklas!
 bool CameraCalibration::backProject(const Mat& boardRot64,
                                     const Mat& boardTrans64,
                                     const vector<Point2f>& imgPt,
                                     vector<Point3f>& worldPt) {
     if( imgPt.size() == 0 ) {
         return false;
     }
     else
     {
         Mat imgPt_h = Mat::zeros(3, imgPt.size(), CV_32F);
         for( int h=0; h<imgPt.size(); ++h ) {
             imgPt_h.at<float>(0,h) = imgPt[h].x;
             imgPt_h.at<float>(1,h) = imgPt[h].y;
             imgPt_h.at<float>(2,h) = 1.0f;
         }
         Mat Kinv64 = getUndistortedIntrinsics().getCameraMatrix().inv();
         Mat Kinv,boardRot,boardTrans;
         Kinv64.convertTo(Kinv, CV_32F);
         boardRot64.convertTo(boardRot, CV_32F);
         boardTrans64.convertTo(boardTrans, CV_32F);
         
         // Transform all image points to world points in camera reference frame
         // and then into the plane reference frame
         Mat worldImgPt = Mat::zeros( 3, imgPt.size(), CV_32F );
         Mat rot3x3;
         Rodrigues(boardRot, rot3x3);
         
         Mat transPlaneToCam = rot3x3.inv()*boardTrans;
         
         for( int i=0; i<imgPt.size(); ++i ) {
             Mat col = imgPt_h.col(i);
             Mat worldPtcam = Kinv*col;
             Mat worldPtPlane = rot3x3.inv()*(worldPtcam);
             
             float scale = transPlaneToCam.at<float>(2)/worldPtPlane.at<float>(2);
             Mat worldPtPlaneReproject = scale*worldPtPlane-transPlaneToCam;
             
             Point3f pt;
             pt.x = worldPtPlaneReproject.at<float>(0);
             pt.y = worldPtPlaneReproject.at<float>(1);
             pt.z = 0;
             worldPt.push_back(pt);
         }
     }
     return true;
 }
Beispiel #19
0
void Pose::UpdateMat() {
    Matx33d rmat;
    Rodrigues(rvec_, rmat);

    mat_(0, 0) = rmat(0, 0);
    mat_(0, 1) = rmat(0, 1);
    mat_(0, 2) = rmat(0, 2);
    mat_(1, 0) = rmat(1, 0);
    mat_(1, 1) = rmat(1, 1);
    mat_(1, 2) = rmat(1, 2);
    mat_(2, 0) = rmat(2, 0);
    mat_(2, 1) = rmat(2, 1);
    mat_(2, 2) = rmat(2, 2);

    mat_(0, 3) = tvec_(0);
    mat_(1, 3) = tvec_(1);
    mat_(2, 3) = tvec_(2);
}
Beispiel #20
0
PinholeCamera::PinholeCamera(Vec3f rvector, Vec3f translation)
{
	Rodrigues(rvector,this->R);
	this->T = Mat(translation);

	this->r = -1;
	this->l = 1;
	this->b = -1;
	this->t = 1;

	this->n = 1;
	this->f = 2;

	this->setFrustum(l,r,b,t,n,f);

	cout << "olha1\n " << this->R << endl;
	cout << this->T << endl;
}
Beispiel #21
0
Isometry3d cvmat2eigen(Mat &r, Mat &t)
{
	//rotation vector to matrix
  Mat R;
  Rodrigues(r, R);
  //opencv matrix to eigen matrix format
  Matrix3d re;
  cv2eigen(R, re);
  
  AngleAxisd angle(re);
  
  //3x3 identity matrix
  Isometry3d T = Isometry3d::Identity();
  T = angle;
  
  T(0,3) = t.at<double>(0,0);
  T(1,3) = t.at<double>(0,1);
  T(2,3) = t.at<double>(0,2);
  
  return T;
}
Beispiel #22
0
void Warp::set(Matx13f rotate)
{
	r = rotate;
	Matx<float, 3, 9> J;
	Rodrigues(r, R, J);
	Dx = Matx33f(
		J(0, 0), J(1, 0), J(2, 0),
		J(0, 3), J(1, 3), J(2, 3),
		J(0, 6), J(1, 6), J(2, 6)
		);
	Dy = Matx33f(
		J(0, 1), J(1, 1), J(2, 1),
		J(0, 4), J(1, 4), J(2, 4),
		J(0, 7), J(1, 7), J(2, 7)
		);
	Dz = Matx33f(
		J(0, 2), J(1, 2), J(2, 2),
		J(0, 5), J(1, 5), J(2, 5),
		J(0, 8), J(1, 8), J(2, 8)
		);
}
Beispiel #23
0
void PnPProblem::estimatePoseRANSAC( const std::vector<cv::Point3f> &list_points3d, // list with model 3D coordinates
                                     const std::vector<cv::Point2f> &list_points2d,     // list with scene 2D coordinates
                                     int flags, cv::Mat &inliers, int iterationsCount,  // PnP method; inliers container
                                     float reprojectionError, double confidence )    // Ransac parameters
{
  cv::Mat distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1);  // vector of distortion coefficients
  cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1);          // output rotation vector
  cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1);    // output translation vector

  bool useExtrinsicGuess = false;   // if true the function uses the provided rvec and tvec values as
            // initial approximations of the rotation and translation vectors

  cv::solvePnPRansac( list_points3d, list_points2d, _A_matrix, distCoeffs, rvec, tvec,
                useExtrinsicGuess, iterationsCount, reprojectionError, confidence,
                inliers, flags );

  Rodrigues(rvec,_R_matrix);      // converts Rotation Vector to Matrix
  _t_matrix = tvec;       // set translation matrix

  this->set_P_matrix(_R_matrix, _t_matrix); // set rotation-translation matrix

}
Beispiel #24
0
// Estimate the pose given a list of 2D/3D correspondences and the method to use
bool PnPProblem::estimatePose( const std::vector<cv::Point3f> &list_points3d,
                               const std::vector<cv::Point2f> &list_points2d,
                               int flags)
{
  cv::Mat distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1);
  cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1);
  cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1);

  bool useExtrinsicGuess = false;

  // Pose estimation
  bool correspondence = cv::solvePnP( list_points3d, list_points2d, _A_matrix, distCoeffs, rvec, tvec,
                                      useExtrinsicGuess, flags);

  // Transforms Rotation Vector to Matrix
  Rodrigues(rvec,_R_matrix);
  _t_matrix = tvec;

  // Set projection matrix
  this->set_P_matrix(_R_matrix, _t_matrix);

  return correspondence;
}
Beispiel #25
0
bool solvePnP( InputArray _opoints, InputArray _ipoints,
               InputArray _cameraMatrix, InputArray _distCoeffs,
               OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags )
{
    CV_INSTRUMENT_REGION()

    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );

    Mat rvec, tvec;
    if( flags != SOLVEPNP_ITERATIVE )
        useExtrinsicGuess = false;

    if( useExtrinsicGuess )
    {
        int rtype = _rvec.type(), ttype = _tvec.type();
        Size rsize = _rvec.size(), tsize = _tvec.size();
        CV_Assert( (rtype == CV_32F || rtype == CV_64F) &&
                   (ttype == CV_32F || ttype == CV_64F) );
        CV_Assert( (rsize == Size(1, 3) || rsize == Size(3, 1)) &&
                   (tsize == Size(1, 3) || tsize == Size(3, 1)) );
    }
    else
    {
        int mtype = CV_64F;
        // use CV_32F if all PnP inputs are CV_32F and outputs are empty
        if (_ipoints.depth() == _cameraMatrix.depth() && _ipoints.depth() == _opoints.depth() &&
            _rvec.empty() && _tvec.empty())
            mtype = _opoints.depth();

        _rvec.create(3, 1, mtype);
        _tvec.create(3, 1, mtype);
    }
    rvec = _rvec.getMat();
    tvec = _tvec.getMat();

    Mat cameraMatrix0 = _cameraMatrix.getMat();
    Mat distCoeffs0 = _distCoeffs.getMat();
    Mat cameraMatrix = Mat_<double>(cameraMatrix0);
    Mat distCoeffs = Mat_<double>(distCoeffs0);
    bool result = false;

    if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
    {
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
        epnp PnP(cameraMatrix, opoints, undistortedPoints);

        Mat R;
        PnP.compute_pose(R, tvec);
        Rodrigues(R, rvec);
        result = true;
    }
    else if (flags == SOLVEPNP_P3P)
    {
        CV_Assert( npoints == 4);
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
        p3p P3Psolver(cameraMatrix);

        Mat R;
        result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
        if (result)
            Rodrigues(R, rvec);
    }
    else if (flags == SOLVEPNP_AP3P)
    {
        CV_Assert( npoints == 4);
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
        ap3p P3Psolver(cameraMatrix);

        Mat R;
        result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
        if (result)
            Rodrigues(R, rvec);
    }
    else if (flags == SOLVEPNP_ITERATIVE)
    {
        CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
        CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
        CvMat c_rvec = rvec, c_tvec = tvec;
        cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
                                     c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
                                     &c_rvec, &c_tvec, useExtrinsicGuess );
        result = true;
    }
    /*else if (flags == SOLVEPNP_DLS)
    {
        Mat undistortedPoints;
        undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);

        dls PnP(opoints, undistortedPoints);

        Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
        bool result = PnP.compute_pose(R, tvec);
        if (result)
            Rodrigues(R, rvec);
        return result;
    }
    else if (flags == SOLVEPNP_UPNP)
    {
        upnp PnP(cameraMatrix, opoints, ipoints);

        Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
        PnP.compute_pose(R, tvec);
        Rodrigues(R, rvec);
        return true;
    }*/
    else
        CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
    return result;
}
void CalcObjectLocation::calculate() {
	CLOG(LDEBUG)<<"in calculate()";

	Types::HomogMatrix homogMatrix;
	vector<cv::Mat_<double> > rvec;
	vector<cv::Mat_<double> > tvec;
	vector<cv::Mat_<double> > axis;
	vector<double> fi;
	cv::Mat_<double> tvectemp;
	cv::Mat_<double> rotMatrix;
	rotMatrix = cv::Mat_<double>::zeros(3,3);
	tvectemp = cv::Mat_<double>::zeros(3,1);

	while (!in_homogMatrix.empty()) {
		cv::Mat_<double> rvectemp;
		homogMatrix=in_homogMatrix.read();

		for (int i = 0; i < 3; ++i) {
			for (int j = 0; j < 3; ++j) {
				rotMatrix(i,j)=homogMatrix.elements[i][j];
			}
			tvectemp(i, 0) = homogMatrix.elements[i][3];
		}

		Rodrigues(rotMatrix, rvectemp);
		CLOG(LINFO) << rvectemp << "\n";
		rvec.push_back(rvectemp);
		tvec.push_back(tvectemp);
	}

	if (rvec.size() == 1) {
		out_homogMatrix.write(homogMatrix);
		return;
	}

	float fi_sum=0, fi_avg;
	cv::Mat_<double> axis_sum, axis_avg;
	cv::Mat_<double> rvec_avg;
	cv::Mat_<double> tvec_avg, tvec_sum;

	axis_sum = cv::Mat_<double>::zeros(3,1);
	tvec_sum = cv::Mat_<double>::zeros(3,1);

	for(int i = 0; i < rvec.size(); i++) {
		float fitmp = sqrt((pow(rvec.at(i)(0,0), 2) + pow(rvec.at(i)(1,0), 2)+pow(rvec.at(i)(2,0),2)));
		fi.push_back(fitmp);

		fi_sum+=fitmp;
		cv::Mat_<double> axistemp;
		axistemp.create(3,1);
		for(int k=0;k<3;k++) {
				axistemp(k,0)=rvec.at(i)(k,0)/fitmp;
		}
		axis.push_back(axistemp);
		axis_sum+=axistemp;
		tvec_sum+=tvec.at(i);
	}

	fi_avg = fi_sum/fi.size();
	axis_avg = axis_sum/axis.size();
	rvec_avg = axis_avg * fi_avg;
	tvec_avg = tvec_sum/tvec.size();

	Types::HomogMatrix hm;
	cv::Mat_<double> rottMatrix;
	Rodrigues(rvec_avg, rottMatrix);

	for (int i = 0; i < 3; ++i) {
		for (int j = 0; j < 3; ++j) {
			hm.elements[i][j] = rottMatrix(i, j);
			CLOG(LINFO) << hm.elements[i][j] << "  ";
		}
		hm.elements[i][3] = tvec_avg(i, 0);
		CLOG(LINFO) << hm.elements[i][3] << "\n";
	}
	out_homogMatrix.write(hm);
}
bool ReconstructionHandler::initializeStereoModel(std::vector<cv::Point3d> &points, std::vector<cv::Vec3b> &pointsRGB, int imageInitializedCnt){
		
	// Camera parameters - necessary for reconstruction
	cv::Matx34d P0,P1;
	cv::Mat_<double> t;
	cv::Mat_<double> R;
	cv::Mat_<double> rvec(1,3); 

	// Set arbitrary parameters - will be refined
	P0 = cv::Matx34d(1,0,0,0,
					0,1,0,0,
					0,0,1,0);
	P1 = cv::Matx34d(1,0,0,0,
					0,1,0,0,
					0,0,1,0);

	// Attempt to find baseline traingulation
	LOG(Debug, "Trying to find baseline triangulation...");

	// Find best two images to start with
	std::vector<CloudPoint> tmpPcloud;

	// Sort pairwise matches to find the lowest Homography inliers [Snavely07 4.2]
	// Take less matches tan Snavely suggests...
	std::list<std::pair<int,std::pair<int,int> > > matchesSizes;
	// For debugging
	std::map<std::pair<int,int> ,std::vector<cv::DMatch> > tempMatches = sceneModel->getMatches();

	for(std::map<std::pair<int,int> ,std::vector<cv::DMatch> >::iterator i = tempMatches.begin(); i != tempMatches.end(); ++i) {
		if((*i).second.size() < 60)
			matchesSizes.push_back(make_pair(100,(*i).first));
		else {
			int Hinliers = findHomographyInliers2Views((*i).first.first,(*i).first.second);
			int percent = (int)(((double)Hinliers) / ((double)(*i).second.size()) * 100.0);
			LOG(Info, "The percentage of inliers for images ", (*i).first.first, "," , (*i).first.second, " = ", percent);
			matchesSizes.push_back(make_pair((int)percent,(*i).first));
		}
	}
	matchesSizes.sort(sortByFirst);

	// Reconstruct from two views
	bool goodF = false;
	int highest_pair = 0;
	m_first_view = m_second_view = 0;
	// Reverse iterate by number of matches
	for(std::list<std::pair<int,std::pair<int,int> > >::iterator highest_pair = matchesSizes.begin(); 
		highest_pair != matchesSizes.end() && !goodF; 
		++highest_pair) 
	{
		m_second_view = (*highest_pair).second.second;
		m_first_view  = (*highest_pair).second.first;

		LOG(Debug, "Attempting reconstruction from view ", m_first_view, " and ", m_second_view);
		// If reconstrcution of first two views is bad, fallback to another pair
		// See if the Fundamental Matrix between these two views is good
		std::vector<cv::KeyPoint> keypoints1Refined;
		std::vector<cv::KeyPoint> keypoints2Refined;
		goodF = findCameraMatrices(sceneModel->K, sceneModel->Kinv, sceneModel->distortionCoefficients, 
				sceneModel->getKeypoints(m_first_view), sceneModel->getKeypoints(m_second_view), 
				keypoints1Refined, keypoints2Refined, P0, P1, sceneModel->getMatches(m_first_view,m_second_view), 
				tmpPcloud);

		if (goodF) {
			std::vector<CloudPoint> new_triangulated;
			std::vector<int> add_to_cloud;

			sceneModel->poseMats[m_first_view] = P0;
			sceneModel->poseMats[m_second_view] = P1;

			// TODO imageInitialzedCnt used to initalize correspondence matching vector
			// Will fail if more images are added later...
			bool good_triangulation = triangulatePointsBetweenViews(m_second_view,m_first_view,new_triangulated,add_to_cloud, imageInitializedCnt);
			if(!good_triangulation || cv::countNonZero(add_to_cloud) < 10) {
				LOG(Debug, " Triangulation failed");
				goodF = false;
				sceneModel->poseMats[m_first_view] = 0;
				sceneModel->poseMats[m_second_view] = 0;
				m_second_view++;
			} else {
				assert(new_triangulated[0].imgpt_for_img.size() > 0);
				LOG(Debug, " Points before triangulation: ", (int)sceneModel->reconstructedPts.size());
				for (unsigned int j=0; j<add_to_cloud.size(); j++) {
					if(add_to_cloud[j] == 1) {
						sceneModel->reconstructedPts.push_back(new_triangulated[j]);
					}
				}
				LOG(Debug, " Points after triangulation: ", (int)sceneModel->reconstructedPts.size());
			}				
		}
	}
		
	if (!goodF) {
		LOG(Error, "Cannot find a good pair of images to obtain a baseline triangulation");
		return false;
	}

	LOG(Debug, "===================================================================");
	LOG(Debug, "Taking baseline from " , m_first_view , " and " , m_second_view);
	LOG(Debug, "===================================================================");

	// Adjust bundle for everything so far before display
	cv::Mat temporaryCameraMatrix = sceneModel->K;
	BundleAdjustment BA;
	BA.adjustBundle(sceneModel->reconstructedPts,temporaryCameraMatrix,sceneModel->getKeypoints(),sceneModel->poseMats);
	updateReprojectionErrors();

	#ifdef __DEBUG__DISPLAY__
	// DEBUG - Drawing matches that survived the fundamental matrix
	cv::drawMatches(sceneModel->frames[0], sceneModel->getKeypoints(0), 
					sceneModel->frames[1], sceneModel->getKeypoints(1),
					sceneModel->getMatches(0,1), img_matches, cv::Scalar(0,0,255));
	imshow( "Good Matches After F Refinement", img_matches );
	cv::waitKey(0);
	#endif		

	// Pass reconstructed points to 3D display		
	// TODO unnecessary copying
	for (int i = 0; i < sceneModel->reconstructedPts.size(); ++i) {
		points.push_back(sceneModel->reconstructedPts[i].pt);
	}

	getRGBForPointCloud(sceneModel->reconstructedPts, pointsRGB);
				
	// End of stereo initialization
	LOG(Debug, "Initialized stereo model");

	P1 = sceneModel->poseMats[m_second_view];
	t = (cv::Mat_<double>(1,3) << P1(0,3), P1(1,3), P1(2,3));
	R = (cv::Mat_<double>(3,3) << P1(0,0), P1(0,1), P1(0,2), 
									P1(1,0), P1(1,1), P1(1,2), 
									P1(2,0), P1(2,1), P1(2,2));
	Rodrigues(R, rvec);

	// Update sets which track what was processed 
	sceneModel->doneViews.clear();
	sceneModel->goodViews.clear();
	sceneModel->doneViews.insert(m_first_view);
	sceneModel->doneViews.insert(m_second_view);
	sceneModel->goodViews.insert(m_first_view);
	sceneModel->goodViews.insert(m_second_view);

	return true;
}
Beispiel #28
0
	void estimatePose(vector<Point3d> modelPoints, vector<Point2f> imagePoints, Point2f centerPoint, double (cameraMatrix)[16], double (&resultMatrix)[16] ){
		
		Mat rvec, tvec, rMat;
		
		double rot[9] = { 0 };
		vector<double> rv(3), tv(3);
		
		rvec = Mat(rv);
		
		double _d[9] = { 1, 0, 0, 0, -1, 0, 0, 0, -1 };
		
		
		Rodrigues(Mat(3, 3, CV_64FC1, _d), rvec);
		
		tv[0] = 0;
		tv[1] = 0;
		tv[2] = 1;
		
		tvec = Mat(tv);
		
		
		// Camera matrix
		double _cm[9] = { 3.1810194786433851e+02, 0., 2.0127743042733985e+02, 0.,
			3.1880182087777166e+02, 1.2606640793496385e+02, 0., 0., 1 };
		
		// Distortion coefficients
		double _dc[] = { 0, 0, 0, 0 };
		
		
		Mat camMatrix = Mat(3, 3, CV_64FC1, _cm);
		
		solvePnP(Mat(modelPoints), Mat(imagePoints), camMatrix, Mat(1, 4,
																	CV_64FC1, _dc), rvec, tvec, true);
		
		
		rMat = Mat(3, 3, CV_64FC1, rot);
		
		Rodrigues(rvec, rMat);
		
		tvec.at<double> (0, 0) += 2.0127743042733985e+02 + 4.69099e-02;
		tvec.at<double> (1, 0) += 1.2606640793496385e+02 - 2.5968e-02;
		tvec.at<double> (2, 0) -= 3.1810194786433851e+02 + 2.31789e-01;
		
		
		
		resultMatrix[0] = rMat.at<double> (0, 0);//*scale;
		resultMatrix[1] = -rMat.at<double> (1, 0);
		resultMatrix[2] = -rMat.at<double> (2, 0);
		resultMatrix[3] = 0;
		resultMatrix[4] = rMat.at<double> (0, 1);
		resultMatrix[5] = -rMat.at<double> (1, 1);//*scale;
		resultMatrix[6] = -rMat.at<double> (2, 1);
		resultMatrix[7] = 0;
		resultMatrix[8] = rMat.at<double> (0, 2);
		resultMatrix[9] = -rMat.at<double> (1, 2);
		resultMatrix[10] = -rMat.at<double> (2, 2);//*scale;
		resultMatrix[11] = 0;
		resultMatrix[12] = 4.0f*(4.0f/3.0f)*(5.0f/3.0f)*(centerPoint.x - 200)/400;
		resultMatrix[13] = -4.0f*(4.0f/3.0f)*1.0f*(centerPoint.y - 120)/240;
		resultMatrix[14] = -4.0f;
		resultMatrix[15] = 1.0f;
		
	}	
Beispiel #29
0
void solvePlanarPnP(const Mat& objectPoints, const Mat& imagePoints, const Mat& cameraMatrix, const Mat& distCoeffs,
                    Mat& _rvec, Mat& _tvec, bool useExtrinsicGuess)
{
    CV_Assert(objectPoints.depth() == CV_32F && imagePoints.depth() == CV_32F);

    if(useExtrinsicGuess == false)
    {
        solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, _rvec, _tvec, false);
        return;
    }

    Mat rvec, tvec;
    _rvec.convertTo(rvec, CV_32FC1);
    _tvec.convertTo(tvec, CV_32FC1);

    // calculate rotation matrix
    Mat R(3, 3, CV_32FC1);
    Rodrigues(rvec, R);
    CV_Assert(R.type() == CV_32FC1);

    // calculate object normal
    Point3f normal0 = getPlanarObjectNormal(objectPoints);
//    printf("Normal0: %f %f %f\n", normal0.x, normal0.y, normal0.z);

    Mat Normal0(3, 1, CV_32F);
    Normal0.at<Point3f>(0, 0) = normal0;
    Mat Normal = R*Normal0;
    Point3f normal = Normal.at<Point3f>(0, 0);
    normal = normal*(1.0/norm(normal));
    if(normal.z < 0) normal = -normal; // z points from the camera
//    printf("Normal: %f %f %f\n", normal.x, normal.y, normal.z);

    vector<Point3f> rotated_object_points;
    rotated_object_points.resize(objectPoints.rows);
    for(size_t i = 0; i < rotated_object_points.size(); i++)
    {
        Mat p = objectPoints.rowRange(i, i + 1);
        p = p.reshape(1, 3);        
        Mat res = R*p;
        rotated_object_points[i] = res.at<Point3f>(0, 0);
    }

    double alpha, C;
    vector<Point3f> object_points_crf;
    findPlanarObjectPose(rotated_object_points, imagePoints, normal, cameraMatrix, distCoeffs, alpha, C, object_points_crf);

    Mat rp(3, 1, CV_32FC1);
    rp.at<Point3f>(0, 0) = normal*alpha;

    Mat Rp;
    Rodrigues(rp, Rp);

    R = Rp*R;
    Rodrigues(R, rvec);

    Point3f center1 = massCenter(rotated_object_points);
    Mat mcenter1(3, 1, CV_32FC1, &center1);
    Mat mcenter1_alpha = Rp*mcenter1;
    Point3f center1_alpha = mcenter1_alpha.at<Point3f>(0, 0);

    Point3f center2 = massCenter(object_points_crf);
    tvec.at<Point3f>(0, 0) = center2 - center1_alpha;

    Mat mobj;
    objectPoints.copyTo(mobj);
    mobj = mobj.reshape(1);

    CV_Assert(R.type() == CV_32FC1 && mobj.type() == CV_32FC1);
    Mat mrobj = R*mobj.t();
    mrobj = mrobj.t();
    Point3f p1 = mrobj.at<Point3f>(0, 0) + center2 - center1;
    Point3f p2 = object_points_crf[0];
//    printf("point1: %f %f %f\n", p1.x, p1.y, p1.z);
//    printf("point2: %f %f %f\n", p2.x, p2.y, p2.z);

    rvec.convertTo(_rvec, _rvec.depth());
    tvec.convertTo(_tvec, _tvec.depth());
}
Beispiel #30
0
 void stream(const double& dt)
 {
   u1 = Rodrigues(w1 * dt) * Vector(u1);
   u2 = Rodrigues(w2 * dt) * Vector(u2);
   r12 += v12 * dt;
 }