Beispiel #1
0
void PatternTrackingInfo::computePose(const Pattern& pattern, const CameraCalibration& calibration)
{
  cv::Mat Rvec;
  cv::Mat_<float> Tvec;
  cv::Mat raux,taux;

  cv::solvePnP(pattern.points3d, points2d, calibration.getIntrinsic(), calibration.getDistorsion(),raux,taux);
  raux.convertTo(Rvec,CV_32F);
  taux.convertTo(Tvec ,CV_32F);

  cv::Mat_<float> rotMat(3,3); 
  cv::Rodrigues(Rvec, rotMat);

  // Copy to transformation matrix
  for (int col=0; col<3; col++)
  {
    for (int row=0; row<3; row++)
    {        
     pose3d.r().mat[row][col] = rotMat(row,col); // Copy rotation component
    }
    pose3d.t().data[col] = Tvec(col); // Copy translation component
  }

  // Since solvePnP finds camera location, w.r.t to marker pose, to get marker pose w.r.t to the camera we invert it.
  pose3d = pose3d.getInverted();
}
void MarkerDetector::estimatePosition(std::vector<Marker>& detectedMarkers)
{
    for (size_t i=0; i<detectedMarkers.size(); i++)
    {					
        Marker& m = detectedMarkers[i];

        cv::Mat Rvec;
        cv::Mat_<float> Tvec;
        cv::Mat raux,taux;
        cv::solvePnP(m_markerCorners3d, m.points, camMatrix, distCoeff,raux,taux);
        raux.convertTo(Rvec,CV_32F);
        taux.convertTo(Tvec ,CV_32F);

        cv::Mat_<float> rotMat(3,3); 
        cv::Rodrigues(Rvec, rotMat);

        // Copy to transformation matrix
        for (int col=0; col<3; col++)
        {
            for (int row=0; row<3; row++)
            {        
                m.transformation.r().mat[row][col] = rotMat(row,col); // Copy rotation component
            }
            m.transformation.t().data[col] = Tvec(col); // Copy translation component
        }

        // Since solvePnP finds camera location, w.r.t to marker pose, to get marker pose w.r.t to the camera we invert it.
        m.transformation = m.transformation.getInverted();
    }
}
// We can use the precise location of marker corners to estimate a transformation between our camera and a marker in 3D.
// This operation is known as pose estimation from 2D-3D correspondences.
// This process finds an Euclidean transformation (rotation + translation) between the camera and the object.
// We define the 3D coordinates of marker corners putting our marker on the XY plane (Z=0),
// with the marker center in the origin (0,0,0).
// Then we find the camera location using the 2D-3D correspondences as input for cv::solvePnP.
void MarkerDetector::estimatePosition(std::vector<Marker>& detectedMarkers)
{
	for (size_t i = 0; i < detectedMarkers.size(); i++) {
		Marker& m = detectedMarkers[i];
		cv::Mat Rvec;
		cv::Mat_<float> Tvec;
		cv::Mat raux;
		cv::Mat taux;
		// See: http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#solvepnp
		cv::solvePnP(markerCorners3D, m.points, camMatrix, distCoeff, raux, taux);
		// See; http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto
		raux.convertTo(Rvec, CV_32F);
		taux.convertTo(Tvec, CV_32F);
		cv::Mat_<float> rotMat(3, 3);
		// See: http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#rodrigues
		cv::Rodrigues(Rvec, rotMat);
		// Copy to transformation matrix.
		m.transformation = Transformation();
		for (int col = 0; col < 3; col++) {
			for (int row = 0; row < 3; row++) {
				// Copy rotation component.
				m.transformation.r().mat[row][col] = rotMat(row, col);
			}
			// Copy translation component.
			m.transformation.t().data[col] = Tvec(col);
		}
		// Since solvePnP finds camera location, with regard to the marker pose, 
		// to get marker pose with regard to the camera we invert it.
		m.transformation = m.transformation.getInverted();
	}
}
Beispiel #4
0
Marker::Marker(const std::vector<cv::Point2f>& corners, int _id) : std::vector<cv::Point2f>(corners) {
    id = _id;
    ssize = -1;
    for (int i = 0; i < 3; i++)
        Tvec(i) = Rvec(i) = -999999;
}
Beispiel #5
0
Marker::Marker() {
    id = -1;
    ssize = -1;
    for (int i = 0; i < 3; i++)
        Tvec(i) = Rvec(i) = -999999;
}