Example #1
0
    void Marker::calculateExtrinsics(float markerSizeMeters, cv::Mat camMatrix, cv::Mat distCoeff,
                                     bool setYPerpendicular)
    {
        if (!isValid())
            throw cv::Exception(9004, "!isValid(): invalid marker. It is not possible to calculate extrinsics",
                                "calculateExtrinsics", __FILE__, __LINE__);
        if (markerSizeMeters <= 0)
            throw cv::Exception(9004, "markerSize<=0: invalid markerSize", "calculateExtrinsics", __FILE__, __LINE__);
        if (camMatrix.rows == 0 || camMatrix.cols == 0)
            throw cv::Exception(9004, "CameraMatrix is empty", "calculateExtrinsics", __FILE__, __LINE__);

        vector<cv::Point3f> objpoints = get3DPoints(markerSizeMeters);


        cv::Mat raux, taux;
//        cv::solvePnP(objpoints, *this, camMatrix, distCoeff, raux, taux);
        solvePnP(objpoints, *this,camMatrix, distCoeff,raux,taux);
        raux.convertTo(Rvec, CV_32F);
        taux.convertTo(Tvec, CV_32F);
        // rotate the X axis so that Y is perpendicular to the marker plane
        if (setYPerpendicular)
            rotateXAxis(Rvec);
        ssize = markerSizeMeters;
        // cout<<(*this)<<endl;

//        auto setPrecision=[](double f, double prec){
//            int x=roundf(f*prec);
//            return  double(x)/prec;
//        };
//        for(int i=0;i<3;i++){
//            Rvec.ptr<float>(0)[i]=setPrecision(Rvec.ptr<float>(0)[i],100);
//            Tvec.ptr<float>(0)[i]=setPrecision(Tvec.ptr<float>(0)[i],1000);
//        }

    }
Example #2
0
bool Camera::calculateExtrinsicParams(vector<Point3f> objectPoints, vector<Point2f> imagePoints)
{
	OPENCV_ASSERT(isCalibrated,"Camera.calculateExtrinsicParams","Cannot calculate extrinsic parameters before camera calibration!");
	Mat rvec, tvec;
	Mat rotMtx;
	bool solverResult = solvePnP(
		Mat(objectPoints), Mat(imagePoints),	// Input correspondences
		cameraMatrix, distortionCoeffs,	// Intrinsic camera parameters (have to be already available)
		rvec, tvec);

	if (solverResult)
	{
		// Create this->T from rvec and tvec
		Rodrigues(rvec, rotMtx);
		Matx44f T_inv = Matx44f(
			(float)rotMtx.at<double>(0,0), (float)rotMtx.at<double>(0,1), (float)rotMtx.at<double>(0,2), (float)tvec.at<double>(0,0),
			(float)rotMtx.at<double>(1,0), (float)rotMtx.at<double>(1,1), (float)rotMtx.at<double>(1,2), (float)tvec.at<double>(1,0),
			(float)rotMtx.at<double>(2,0), (float)rotMtx.at<double>(2,1), (float)rotMtx.at<double>(2,2), (float)tvec.at<double>(2,0),
			0.0F, 0.0F, 0.0F, 1.0F
			);
		T = T_inv.inv();
		isTSet=true;
	}

	return solverResult;
}
Example #3
0
void DrawAxis::openWebcam()
{
	webcam.open(0);

	webcam.set(CV_CAP_PROP_FRAME_WIDTH, frameWidth);
	webcam.set(CV_CAP_PROP_FRAME_HEIGHT, frameHeight);
	rvec = Mat(Size(3, 1), CV_64F);
	tvec = Mat(Size(3, 1), CV_64F);
	cout << "intrinsinc = " << intrinsic_params << endl;
	cout << "dist = " << distortion_params << endl;
	cout << "intrinsic.size = " << intrinsic_params.size() << endl;
	while (true){
		webcam.read(webcamImage);
		bool findCorners = findChessboardCorners(webcamImage, boardSize, corners, CALIB_CB_FAST_CHECK);
		if (findCorners){
			solvePnP(Mat(boardPoints), Mat(corners), intrinsic_params, distortion_params, rvec, tvec, false);
			projectPoints(cubePoints, rvec, tvec, intrinsic_params, distortion_params, cubeFramePoints);
			projectPoints(framePoints, rvec, tvec, intrinsic_params, distortion_params, imageFramePoints);

			drawAxis(webcamImage, color, 3);
			drawCube(webcamImage, cubeFramePoints, Scalar(255, 0, 255), 2);

		}
		namedWindow("OpenCV Webcam", 0);
		imshow("OpenCV Webcam", webcamImage);

		waitKey(10);
	}
}
Example #4
0
void GenerateExtrinsecMatrix(String intrinsecFileName,std::vector<Point2f> imagePoints, std::vector<Point3f> objectPoints,Mat tvec, Mat rvec, Mat rotationMatrix, Mat cameraMatrix)
{
	//TODO : vérifier nombres de point entrée
	// calculer la distance eucclidienne de rétroprojection
 
  std::cout << "There are " << imagePoints.size() << " imagePoints and " << objectPoints.size() << " objectPoints." << std::endl;
  
  
  //On utilise la matrice intrinseque calculée précédement
	FileStorage fs(intrinsecFileName, FileStorage::READ);
	Mat  distCoeffs;
	fs["camera_matrix"] >> cameraMatrix;
	fs["distortion_coefficients"] >> distCoeffs;
	//cout << "Cam" << cameraMatrix ;

  
  //std::cout << "Initial cameraMatrix: " << cameraMatrix << std::endl;
  //Mat rvec(3,1,DataType<double>::type);

  solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
  cv::Rodrigues(rvec,rotationMatrix);
  
   //Verification de la projection
  std::vector<Point2f> projectedImagePoints;
  cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedImagePoints);
  //TODO :
  //Calculate the retroprojection error
 
  for(unsigned int i = 0; i < projectedImagePoints.size(); ++i)
    {
		std::cout << "Image point: " << imagePoints[i] << " projected to " << projectedImagePoints[i] << std::endl;
    }
	
	
}
Example #5
0
void Pose::estimate(Mat gray,Fiducial& f){
	if (f.found){
		Mat K = (Mat_<float>(3,3) << 525., 0., 320., 0., -525., 240., 0., 0., 1.);
		solvePnP (Mat(f.points), Mat(f.corners), K, Mat(), rvec, tvec, false);
		found = true;
	}
	else
		found = false;
}
bool ASM_Gaze_Tracker::estimateFacePose() {
    if (isTrackingSuccess == false) {
        return false;
    }
    vector<Point2f> imagePoints = tracker.points;
    fliplr(imagePoints, im.size());
    solvePnP(facialPointsIn3D, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
    return true;
}
bool CustomPattern::findRt(InputArray image, InputArray cameraMatrix, InputArray distCoeffs,
                OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess, int flags)
{
    vector<Point2f> imagePoints;
    vector<Point3f> objectPoints;

    if (!findPattern(image, imagePoints, objectPoints))
        return false;
    return solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, flags);
}
Example #8
0
void PoseEstimator::UpdateProjectionMatrix(const vector<Point>& FingerTips)
{
	if (FingerTips.size() != 5)
		return;

	vector<Point2f> FingerTipsFloat;
	for (auto& Tip : FingerTips)
		FingerTipsFloat.push_back(Tip);

	solvePnP(handModel, FingerTipsFloat, cameraMat, distCoeffs, rotationMat, translationMat, false);
}
Example #9
0
    /* Post: compute _model with given points and return number of found models */
    int runKernel( InputArray _m1, InputArray _m2, OutputArray _model ) const
    {
        Mat opoints = _m1.getMat(), ipoints = _m2.getMat();

        bool correspondence = solvePnP( _m1, _m2, cameraMatrix, distCoeffs,
                                            rvec, tvec, useExtrinsicGuess, flags );

        Mat _local_model;
        hconcat(rvec, tvec, _local_model);
        _local_model.copyTo(_model);

        return correspondence;
    }
cv::Mat GoodFrame::getRT(std::vector<cv::Point3f> &modelPoints_min)
{
    cv::Mat img = this->getCapturesImage();
    std::vector<cv::Point2f> imagePoints;
    for (size_t i = 0; i < 68; ++i)
    {
        imagePoints.push_back(cv::Point2f((float)(this->getDetected_landmarks().at<double>(i)),
                                          (float)(this->getDetected_landmarks().at<double>(i+68))));
    }

    /////
    int max_d = MAX(img.rows,img.cols);
    cv::Mat camMatrix = (Mat_<double>(3,3) << max_d, 0,     img.cols/2.0,
                         0,	 max_d, img.rows/2.0,
                         0,	 0,	    1.0);

    cv::Mat ip(imagePoints);
    cv::Mat op(modelPoints_min);
    std::vector<double> rv(3), tv(3);
    cv::Mat rvec(rv),tvec(tv);
    double _dc[] = {0,0,0,0};
    //    std::cout << ip << std::endl << std::endl;
    //    std::cout << op << std::endl << std::endl;
    //    std::cout << camMatrix << std::endl << std::endl;
    solvePnP(op, ip, camMatrix, cv::Mat(1,4,CV_64FC1,_dc), rvec, tvec, false, CV_EPNP);

    double rot[9] = {0};
    cv::Mat rotM(3, 3, CV_64FC1, rot);
    cv::Rodrigues(rvec, rotM);
    double* _r = rotM.ptr<double>();
    //    printf("rotation mat: \n %.3f %.3f %.3f\n%.3f %.3f %.3f\n%.3f %.3f %.3f\n",_r[0],_r[1],_r[2],_r[3],_r[4],_r[5],_r[6],_r[7],_r[8]);

    //    printf("trans vec: \n %.3f %.3f %.3f\n",tv[0],tv[1],tv[2]);

    cv::Mat _pm(3, 4, CV_64FC1);
    _pm.at<double>(0,0) = _r[0]; _pm.at<double>(0,1) = _r[1]; _pm.at<double>(0,2) = _r[2]; _pm.at<double>(0,3) = tv[0];
    _pm.at<double>(1,0) = _r[3]; _pm.at<double>(1,1) = _r[4]; _pm.at<double>(1,2) = _r[5]; _pm.at<double>(1,3) = tv[1];
    _pm.at<double>(2,0) = _r[6]; _pm.at<double>(2,1) = _r[7]; _pm.at<double>(2,2) = _r[8]; _pm.at<double>(2,3) = tv[2];

    return _pm;
}
Example #11
0
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
                        InputArray _cameraMatrix, InputArray _distCoeffs,
                        OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
                        int iterationsCount, float reprojectionError, double confidence,
                        OutputArray _inliers, int flags)
{
    CV_INSTRUMENT_REGION()

    Mat opoints0 = _opoints.getMat(), ipoints0 = _ipoints.getMat();
    Mat opoints, ipoints;
    if( opoints0.depth() == CV_64F || !opoints0.isContinuous() )
        opoints0.convertTo(opoints, CV_32F);
    else
        opoints = opoints0;
    if( ipoints0.depth() == CV_64F || !ipoints0.isContinuous() )
        ipoints0.convertTo(ipoints, CV_32F);
    else
        ipoints = ipoints0;

    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );

    CV_Assert(opoints.isContinuous());
    CV_Assert(opoints.depth() == CV_32F || opoints.depth() == CV_64F);
    CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
    CV_Assert(ipoints.isContinuous());
    CV_Assert(ipoints.depth() == CV_32F || ipoints.depth() == CV_64F);
    CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);

    _rvec.create(3, 1, CV_64FC1);
    _tvec.create(3, 1, CV_64FC1);

    Mat rvec = useExtrinsicGuess ? _rvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat tvec = useExtrinsicGuess ? _tvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();

    int model_points = 5;
    int ransac_kernel_method = SOLVEPNP_EPNP;

    if( npoints == 4 )
    {
        model_points = 4;
        ransac_kernel_method = SOLVEPNP_P3P;
    }

    Ptr<PointSetRegistrator::Callback> cb; // pointer to callback
    cb = makePtr<PnPRansacCallback>( cameraMatrix, distCoeffs, ransac_kernel_method, useExtrinsicGuess, rvec, tvec);

    double param1 = reprojectionError;                // reprojection error
    double param2 = confidence;                       // confidence
    int param3 = iterationsCount;                     // number maximum iterations

    Mat _local_model(3, 2, CV_64FC1);
    Mat _mask_local_inliers(1, opoints.rows, CV_8UC1);

    // call Ransac
    int result = createRANSACPointSetRegistrator(cb, model_points,
        param1, param2, param3)->run(opoints, ipoints, _local_model, _mask_local_inliers);

    if( result > 0 )
    {
        vector<Point3d> opoints_inliers;
        vector<Point2d> ipoints_inliers;
        opoints = opoints.reshape(3);
        ipoints = ipoints.reshape(2);
        opoints.convertTo(opoints_inliers, CV_64F);
        ipoints.convertTo(ipoints_inliers, CV_64F);

        const uchar* mask = _mask_local_inliers.ptr<uchar>();
        int npoints1 = compressElems(&opoints_inliers[0], mask, 1, npoints);
        compressElems(&ipoints_inliers[0], mask, 1, npoints);

        opoints_inliers.resize(npoints1);
        ipoints_inliers.resize(npoints1);
        result = solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix,
                          distCoeffs, rvec, tvec, false,
                          (flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P) ? SOLVEPNP_EPNP : flags) ? 1 : -1;
    }

    if( result <= 0 || _local_model.rows <= 0)
    {
        _rvec.assign(rvec);    // output rotation vector
        _tvec.assign(tvec);    // output translation vector

        if( _inliers.needed() )
            _inliers.release();

        return false;
    }
    else
    {
        _rvec.assign(_local_model.col(0));    // output rotation vector
        _tvec.assign(_local_model.col(1));    // output translation vector
    }

    if(_inliers.needed())
    {
        Mat _local_inliers;
        for (int i = 0; i < npoints; ++i)
        {
            if((int)_mask_local_inliers.at<uchar>(i) != 0) // inliers mask
                _local_inliers.push_back(i);    // output inliers vector
        }
        _local_inliers.copyTo(_inliers);
    }
    return true;
}
Example #12
0
void solvePlanarPnP(const Mat& objectPoints, const Mat& imagePoints, const Mat& cameraMatrix, const Mat& distCoeffs,
                    Mat& _rvec, Mat& _tvec, bool useExtrinsicGuess)
{
    CV_Assert(objectPoints.depth() == CV_32F && imagePoints.depth() == CV_32F);

    if(useExtrinsicGuess == false)
    {
        solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, _rvec, _tvec, false);
        return;
    }

    Mat rvec, tvec;
    _rvec.convertTo(rvec, CV_32FC1);
    _tvec.convertTo(tvec, CV_32FC1);

    // calculate rotation matrix
    Mat R(3, 3, CV_32FC1);
    Rodrigues(rvec, R);
    CV_Assert(R.type() == CV_32FC1);

    // calculate object normal
    Point3f normal0 = getPlanarObjectNormal(objectPoints);
//    printf("Normal0: %f %f %f\n", normal0.x, normal0.y, normal0.z);

    Mat Normal0(3, 1, CV_32F);
    Normal0.at<Point3f>(0, 0) = normal0;
    Mat Normal = R*Normal0;
    Point3f normal = Normal.at<Point3f>(0, 0);
    normal = normal*(1.0/norm(normal));
    if(normal.z < 0) normal = -normal; // z points from the camera
//    printf("Normal: %f %f %f\n", normal.x, normal.y, normal.z);

    vector<Point3f> rotated_object_points;
    rotated_object_points.resize(objectPoints.rows);
    for(size_t i = 0; i < rotated_object_points.size(); i++)
    {
        Mat p = objectPoints.rowRange(i, i + 1);
        p = p.reshape(1, 3);        
        Mat res = R*p;
        rotated_object_points[i] = res.at<Point3f>(0, 0);
    }

    double alpha, C;
    vector<Point3f> object_points_crf;
    findPlanarObjectPose(rotated_object_points, imagePoints, normal, cameraMatrix, distCoeffs, alpha, C, object_points_crf);

    Mat rp(3, 1, CV_32FC1);
    rp.at<Point3f>(0, 0) = normal*alpha;

    Mat Rp;
    Rodrigues(rp, Rp);

    R = Rp*R;
    Rodrigues(R, rvec);

    Point3f center1 = massCenter(rotated_object_points);
    Mat mcenter1(3, 1, CV_32FC1, &center1);
    Mat mcenter1_alpha = Rp*mcenter1;
    Point3f center1_alpha = mcenter1_alpha.at<Point3f>(0, 0);

    Point3f center2 = massCenter(object_points_crf);
    tvec.at<Point3f>(0, 0) = center2 - center1_alpha;

    Mat mobj;
    objectPoints.copyTo(mobj);
    mobj = mobj.reshape(1);

    CV_Assert(R.type() == CV_32FC1 && mobj.type() == CV_32FC1);
    Mat mrobj = R*mobj.t();
    mrobj = mrobj.t();
    Point3f p1 = mrobj.at<Point3f>(0, 0) + center2 - center1;
    Point3f p2 = object_points_crf[0];
//    printf("point1: %f %f %f\n", p1.x, p1.y, p1.z);
//    printf("point2: %f %f %f\n", p2.x, p2.y, p2.z);

    rvec.convertTo(_rvec, _rvec.depth());
    tvec.convertTo(_tvec, _tvec.depth());
}
        void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
                     const Parameters& params, vector<int>& inliers, Mat& rvec, Mat& tvec,
                     const Mat& rvecInit, const Mat& tvecInit, Mutex& resultsMutex)
        {
            Mat modelObjectPoints(1, MIN_POINTS_COUNT, CV_32FC3), modelImagePoints(1, MIN_POINTS_COUNT, CV_32FC2);
            for (int i = 0, colIndex = 0; i < (int)pointsMask.size(); i++)
            {
                if (pointsMask[i])
                {
                    Mat colModelImagePoints = modelImagePoints(Rect(colIndex, 0, 1, 1));
                    imagePoints.col(i).copyTo(colModelImagePoints);
                    Mat colModelObjectPoints = modelObjectPoints(Rect(colIndex, 0, 1, 1));
                    objectPoints.col(i).copyTo(colModelObjectPoints);
                    colIndex = colIndex+1;
                }
            }
            
            //filter same 3d points, hang in solvePnP
            double eps = 1e-10;
            int num_same_points = 0;
            for (int i = 0; i < MIN_POINTS_COUNT; i++)
                for (int j = i + 1; j < MIN_POINTS_COUNT; j++)
                {
                    if (norm(modelObjectPoints.at<Vec3f>(0, i) - modelObjectPoints.at<Vec3f>(0, j)) < eps)
                        num_same_points++;
                }
            if (num_same_points > 0)
                return;
            
            Mat localRvec, localTvec;
            rvecInit.copyTo(localRvec);
            tvecInit.copyTo(localTvec);
        
		    solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec,
				     params.useExtrinsicGuess, params.flags);
		
            
            vector<Point2f> projected_points;
            projected_points.resize(objectPoints.cols);
            projectPoints(objectPoints, localRvec, localTvec, params.camera.intrinsics, params.camera.distortion, projected_points);
            
            Mat rotatedPoints;
            project3dPoints(objectPoints, localRvec, localTvec, rotatedPoints);
            
            vector<int> localInliers;
            for (int i = 0; i < objectPoints.cols; i++)
            {
                Point2f p(imagePoints.at<Vec2f>(0, i)[0], imagePoints.at<Vec2f>(0, i)[1]);
                if ((norm(p - projected_points[i]) < params.reprojectionError)
                    && (rotatedPoints.at<Vec3f>(0, i)[2] > 0)) //hack
                {
                    localInliers.push_back(i);
                }
            }
            
            if (localInliers.size() > inliers.size())
            {
                resultsMutex.lock();
                
                inliers.clear();
                inliers.resize(localInliers.size());
                memcpy(&inliers[0], &localInliers[0], sizeof(int) * localInliers.size());
                localRvec.copyTo(rvec);
                localTvec.copyTo(tvec);
                
                resultsMutex.unlock();
            }
        }
void SolvePNP_method(vector <Point3f> points3d,
		    vector <Point2f> imagePoints, 
		     MatrixXf &R,Vector3f &t, MatrixXf &xcam_, 
		     MatrixXf K,
                     const PointCloud<PointXYZRGB>::Ptr& cloudRGB,//projected data
                    const PointCloud<PointXYZ>::Ptr& cloud_laser_cord,//laser coordinates
                     MatrixXf& points_projected,Mat image,
		     PointCloud<PointXYZ>::ConstPtr cloud)
{
   
  
  Mat_<float> camera_matrix (3, 3);
  camera_matrix(0,0)=K(0,0);
  camera_matrix(0,1)=K(0,1);
  camera_matrix(0,2)=K(0,2);
  camera_matrix(1,0)=K(1,0);
  camera_matrix(1,1)=K(1,1);
  camera_matrix(1,2)=K(1,2);
  camera_matrix(2,0)=K(2,0);
  camera_matrix(2,1)=K(2,1);
  camera_matrix(2,2)=K(2,2);
  
  
  
  
  vector<double> rv(3), tv(3);

  Mat rvec(rv),tvec(tv);

  Mat_<float> dist_coef (5, 1);
   
  dist_coef = 0;
  
  
  cout <<camera_matrix<<endl;
  cout<< imagePoints<<endl;
  cout<< points3d <<endl;
  
  solvePnP( points3d, imagePoints, camera_matrix, dist_coef, rvec, tvec);
  
  //////////////////////////////////////////////////7
  //convert data
  //////////////////////////////////////////////////7
  
  double rot[9] = {0};

  
  Mat R_2(3,3,CV_64FC1,rot);

  //change results only debugging

  Rodrigues(rvec, R_2);

  
  R=Matrix3f::Zero(3,3);
  t=Vector3f(0,0,0);
  
  double* _r = R_2.ptr<double>();

  double* _t = tvec.ptr<double>();

  
  t(0)=_t[0];

  t(1)=_t[1];

  t(2)=_t[2];   

  
  R(0,0)=_r[0];

  R(0,1)=_r[1];

  R(0,2)=_r[2];

  R(1,0)=_r[3];

  R(1,1)=_r[4];

  R(1,2)=_r[5];

  R(2,0)=_r[6];

  R(2,1)=_r[7];

  R(2,2)=_r[8];

  
  
  points_projected=MatrixXf::Zero(2,cloud->points.size ());
  
  for (int j=0;j<cloud->points.size ();j++){

    

    

    PointCloud<PointXYZRGB> PointAuxRGB;

    PointAuxRGB.points.resize(1);

    

    Vector3f xw=Vector3f(cloud->points[j].x,

		cloud->points[j].y,

		cloud->points[j].z);

    

    xw=K*( R*xw+t);

    xw= xw/xw(2);

    

    int col,row;

    

    col=(int)xw(0);

    row=(int)xw(1);

    

     points_projected(0,j)=(int)xw(0);
    points_projected(1,j)=(int)xw(1);
    

    int b,r,g;

    if ((col<0 || row<0) || (col>image.cols || row>image.rows)){

      r=0;

      g=0;

      b=0;

      

    }else{

      

    //  b = img->imageData[img->widthStep * row+ col * 3];

     // g = img->imageData[img->widthStep * row+ col * 3 + 1];

      //r = img->imageData[img->widthStep * row+ col * 3 + 2];    

      

      r=image.at<cv::Vec3b>(row,col)[0];

      g=image.at<cv::Vec3b>(row,col)[1];

      b=image.at<cv::Vec3b>(row,col)[2];

      //std::cout << image.at<cv::Vec3b>(row,col)[0] << " " << image.at<cv::Vec3b>(row,col)[1] << " " << image.at<cv::Vec3b>(row,col)[2] << std::endl;



    }

    

    

    //std::cout << "( "<<r <<","<<g<<","<<b<<")"<<std::endl;

    uint8_t r_i = r;

    uint8_t g_i = g;

    uint8_t b_i = b;

    

    uint32_t rgb = ((uint32_t)r << 16 | (uint32_t)g << 8 | (uint32_t)b);

    

    //int32_t rgb = (r_i << 16) | (g_i << 8) | b_i;

    

    

    PointAuxRGB.points[0].x=cloud->points[j].x;

    PointAuxRGB.points[0].y=cloud->points[j].y;

    PointAuxRGB.points[0].z=cloud->points[j].z;

    PointAuxRGB.points[0].rgb=*reinterpret_cast<float*>(&rgb);  

    

    cloudRGB->points.push_back (PointAuxRGB.points[0]);

    //project point to the image

    
		

  }
  
}
Example #15
0
	void estimatePose(vector<Point3d> modelPoints, vector<Point2f> imagePoints, Point2f centerPoint, double (cameraMatrix)[16], double (&resultMatrix)[16] ){
		
		Mat rvec, tvec, rMat;
		
		double rot[9] = { 0 };
		vector<double> rv(3), tv(3);
		
		rvec = Mat(rv);
		
		double _d[9] = { 1, 0, 0, 0, -1, 0, 0, 0, -1 };
		
		
		Rodrigues(Mat(3, 3, CV_64FC1, _d), rvec);
		
		tv[0] = 0;
		tv[1] = 0;
		tv[2] = 1;
		
		tvec = Mat(tv);
		
		
		// Camera matrix
		double _cm[9] = { 3.1810194786433851e+02, 0., 2.0127743042733985e+02, 0.,
			3.1880182087777166e+02, 1.2606640793496385e+02, 0., 0., 1 };
		
		// Distortion coefficients
		double _dc[] = { 0, 0, 0, 0 };
		
		
		Mat camMatrix = Mat(3, 3, CV_64FC1, _cm);
		
		solvePnP(Mat(modelPoints), Mat(imagePoints), camMatrix, Mat(1, 4,
																	CV_64FC1, _dc), rvec, tvec, true);
		
		
		rMat = Mat(3, 3, CV_64FC1, rot);
		
		Rodrigues(rvec, rMat);
		
		tvec.at<double> (0, 0) += 2.0127743042733985e+02 + 4.69099e-02;
		tvec.at<double> (1, 0) += 1.2606640793496385e+02 - 2.5968e-02;
		tvec.at<double> (2, 0) -= 3.1810194786433851e+02 + 2.31789e-01;
		
		
		
		resultMatrix[0] = rMat.at<double> (0, 0);//*scale;
		resultMatrix[1] = -rMat.at<double> (1, 0);
		resultMatrix[2] = -rMat.at<double> (2, 0);
		resultMatrix[3] = 0;
		resultMatrix[4] = rMat.at<double> (0, 1);
		resultMatrix[5] = -rMat.at<double> (1, 1);//*scale;
		resultMatrix[6] = -rMat.at<double> (2, 1);
		resultMatrix[7] = 0;
		resultMatrix[8] = rMat.at<double> (0, 2);
		resultMatrix[9] = -rMat.at<double> (1, 2);
		resultMatrix[10] = -rMat.at<double> (2, 2);//*scale;
		resultMatrix[11] = 0;
		resultMatrix[12] = 4.0f*(4.0f/3.0f)*(5.0f/3.0f)*(centerPoint.x - 200)/400;
		resultMatrix[13] = -4.0f*(4.0f/3.0f)*1.0f*(centerPoint.y - 120)/240;
		resultMatrix[14] = -4.0f;
		resultMatrix[15] = 1.0f;
		
	}	
// Estimates the heads 3d position and orientation
void ofxFaceTracker2Instance::calculatePoseMatrix(){
    enum FACIAL_FEATURE {
        NOSE=30,
        RIGHT_EYE=36,
        LEFT_EYE=45,
        RIGHT_SIDE=0,
        LEFT_SIDE=16,
        EYEBROW_RIGHT=21,
        EYEBROW_LEFT=22,
        MOUTH_UP=51,
        MOUTH_DOWN=57,
        MOUTH_RIGHT=48,
        MOUTH_LEFT=54,
        SELLION=27,
        MOUTH_CENTER_TOP=62,
        MOUTH_CENTER_BOTTOM=66,
        MENTON=8
    };
    
    
    // Anthropometric for male adult
    // Relative position of various facial feature relative to sellion
    // Values taken from https://en.wikipedia.org/wiki/Human_head
    // Z points forward
    const cv::Point3f P3D_SELLION(0., 0.,0.);
    const cv::Point3f P3D_RIGHT_EYE(-65.5, -5.,-20.);
    const cv::Point3f P3D_LEFT_EYE(65.5, -5.,-20.);
    const cv::Point3f P3D_RIGHT_EAR( -77.5,-6.,-100.);
    const cv::Point3f P3D_LEFT_EAR(77.5,-6.,-100.);
    const cv::Point3f P3D_NOSE( 0.,-48.0,21.0);
    const cv::Point3f P3D_STOMMION( 0.,-75.0,10.0);
    const cv::Point3f P3D_MENTON( 0.,-133.0, 0.);
    
    float aov = 280;
    float focalLength = info.inputWidth * ofDegToRad(aov);
    float opticalCenterX = info.inputWidth/2;
    float opticalCenterY = info.inputHeight/2;
    
    cv::Mat1d projectionMat = cv::Mat::zeros(3,3,CV_32F);
    poseProjection = projectionMat;
    poseProjection(0,0) = focalLength;
    poseProjection(1,1) = focalLength;
    poseProjection(0,2) = opticalCenterX;
    poseProjection(1,2) = opticalCenterY;
    poseProjection(2,2) = 1;
    
    std::vector<cv::Point3f> head_points;
    
    head_points.push_back(P3D_SELLION);
    head_points.push_back(P3D_RIGHT_EYE);
    head_points.push_back(P3D_LEFT_EYE);
    head_points.push_back(P3D_RIGHT_EAR);
    head_points.push_back(P3D_LEFT_EAR);
    head_points.push_back(P3D_MENTON);
    head_points.push_back(P3D_NOSE);
    head_points.push_back(P3D_STOMMION);
    
    std::vector<cv::Point2f> detected_points;
    detected_points.push_back(ofxCv::toCv(getLandmarks().getImagePoint(SELLION)));
    detected_points.push_back(ofxCv::toCv(getLandmarks().getImagePoint(RIGHT_EYE)));
    detected_points.push_back(ofxCv::toCv(getLandmarks().getImagePoint(LEFT_EYE)));
    detected_points.push_back(ofxCv::toCv(getLandmarks().getImagePoint(RIGHT_SIDE)));
    detected_points.push_back(ofxCv::toCv(getLandmarks().getImagePoint(LEFT_SIDE)));
    detected_points.push_back(ofxCv::toCv(getLandmarks().getImagePoint(MENTON)));
    detected_points.push_back(ofxCv::toCv(getLandmarks().getImagePoint(NOSE)));
    
    auto stomion = (ofxCv::toCv(getLandmarks().getImagePoint(MOUTH_CENTER_TOP))
                    + ofxCv::toCv(getLandmarks().getImagePoint(MOUTH_CENTER_BOTTOM))) * 0.5;
    detected_points.push_back(stomion);
    
    // Find the 3D pose of our head
    solvePnP(head_points, detected_points,
             poseProjection, cv::noArray(),
             poservec, posetvec, false,
#ifdef OPENCV3
             cv::SOLVEPNP_ITERATIVE);
#else
			 CV_ITERATIVE);
#endif
    
    // Black magic: The x axis in the rotation vector needs to get flipped.
    double * r = poservec.ptr<double>(0) ;
    r[0] *= -1;
    r[1] *= -1;
    
    poseCalculated = true;
}
bool CustomPattern::findRt(InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix,
                InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess, int flags)
{
    return solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, flags);
}