Exemple #1
0
CameraPose G2ODriver::GetPose( g2o::HyperGraph::Vertex& vertex, const Eigen::Matrix6d& covariance )
{
  g2o::VertexCam& vertex_view = dynamic_cast<g2o::VertexCam&>( vertex );

  // apparently in g2o translation means position.
  Eigen::Vector3d position = vertex_view.estimate().translation();
  const Eigen::Quaterniond& orientation = vertex_view.estimate().rotation();

  return CameraPose(position, orientation, covariance);
}
Exemple #2
0
// Convert from tf::Pose to CameraPose (position and orientation)
inline void TFPose2CameraPose(const tf::Pose& pose, CameraPose& cameraPose)
{
  // convert to position opencv vector
  tf::Vector3 position_tf = pose.getOrigin();
  cv::Point3d position = cv::Point3d(position_tf.getX(), position_tf.getY(), position_tf.getZ());

  // Convert to orientation opencv quaternion
  tf::Quaternion orientation_tf = pose.getRotation();
  cv::Vec4d orientation(orientation_tf.getW(), orientation_tf.getX(), orientation_tf.getY(), orientation_tf.getZ());

  cameraPose = CameraPose(position, orientation);
}
Exemple #3
0
void sptam::sptam_node::onImages(
  const sensor_msgs::ImageConstPtr& l_image_msg, const sensor_msgs::CameraInfoConstPtr& left_info,
  const sensor_msgs::ImageConstPtr& r_image_msg, const sensor_msgs::CameraInfoConstPtr& right_info
)
{
  ROS_INFO_STREAM("dt: " << (l_image_msg->header.stamp - r_image_msg->header.stamp).toSec());

  // Save current Time
  ros::Time currentTime = l_image_msg->header.stamp;

  // If using odometry, try to get a new estimate for the current pose.
  // if not using odometry, camera_to_odom is left blank.
  tf::StampedTransform camera_to_odom;
  if ( use_odometry_ )
  {
    if ( not getCameraInOdom(camera_to_odom, currentTime) )
      return;

    TFPose2CameraPose(odom_to_map_ * camera_to_odom, cameraPose_);
  }
  else
  {
    cv::Point3d currentCameraPosition;
    cv::Vec4d currentCameraOrientation;
    motionModel_->PredictNextCameraPose(currentCameraPosition, currentCameraOrientation);
    cameraPose_ = CameraPose( currentCameraPosition, currentCameraOrientation );
  }

  // If SPTAM has not been initialized yet, do it
  if ( sptam_ == nullptr )
  {
    ROS_INFO_STREAM("init calib");
    loadCameraCalibration(left_info, right_info);
  }

  // convert image to OpenCv cv::Mat format
  cv_bridge::CvImageConstPtr bridgeLeft_ptr = cv_bridge::toCvShare(l_image_msg, "rgb8");
  cv_bridge::CvImageConstPtr bridgeRight_ptr = cv_bridge::toCvShare(r_image_msg, "rgb8");

  // save images
  cv::Mat imageLeft = bridgeLeft_ptr->image;
  cv::Mat imageRight = bridgeRight_ptr->image;

  #ifdef SHOW_PROFILING
      double start, end;
      start = GetSeg();
  #endif // SHOW_PROFILING

  #ifdef SHOW_PROFILING
    double startStep, endStep;
    startStep = GetSeg();
  #endif

  FeatureExtractorThread featureExtractorThreadLeft(imageLeft, *featureDetector_, *descriptorExtractor_);
  FeatureExtractorThread featureExtractorThreadRight(imageRight, *featureDetector_, *descriptorExtractor_);

  featureExtractorThreadLeft.WaitUntilFinished();
  const std::vector<cv::KeyPoint>& keyPointsLeft = featureExtractorThreadLeft.GetKeyPoints();
  const cv::Mat& descriptorsLeft = featureExtractorThreadLeft.GetDescriptors();

  featureExtractorThreadRight.WaitUntilFinished();
  const std::vector<cv::KeyPoint>& keyPointsRight = featureExtractorThreadRight.GetKeyPoints();
  const cv::Mat& descriptorsRight = featureExtractorThreadRight.GetDescriptors();

  ImageFeatures imageFeaturesLeft = ImageFeatures(imageLeft, keyPointsLeft, descriptorsLeft, mapper_params_.matchingCellSize);
  ImageFeatures imageFeaturesRight = ImageFeatures(imageRight, keyPointsRight, descriptorsRight, mapper_params_.matchingCellSize);

  #ifdef SHOW_PROFILING
    endStep = GetSeg();
    WriteToLog(" tk Extract: ", startStep, endStep);
  #endif

  // if the map was not initialized, try to build it
  if( not map_.nMapPoints() )
  {
    ROS_INFO("Trying to intialize map...");

    // Create keyFrame
    StereoFrame* frame = new StereoFrame(
      cameraPose_, cameraParametersLeft_,
      stereo_baseline_, cameraParametersRight_,
      imageFeaturesLeft, imageFeaturesRight, true
    );

    frame->SetId( 0 ); // Set Keyframe ID

    // Initialize MapMaker
    // TODO: this bool must be a local variable to do not check not map_.nMapPoints() in the "if"
    /*bool isMapInitialized = */InitFromStereo( map_, *frame, imageLeft, *rowMatcher_);
  }
  // if the map is already initialized, do tracking
  else
  {

    cameraPose_ = sptam_->track(cameraPose_, imageFeaturesLeft, imageFeaturesRight, imageLeft, imageRight);

    #ifdef SHOW_PROFILING
      end = GetSeg();
      std::cout << GetSeg() << " tk trackingtotal: " << (end - start) << std::endl;

      std::stringstream message;
      message << std::fixed <<  GetSeg() << " tk trackingtotal: " << (end - start) << std::endl;
      Logger::Write( message.str() );
    #endif

    // fix the error between map and odom to correct the current pose.
    if ( use_odometry_ )
      fixOdomFrame( cameraPose_, camera_to_odom, currentTime );
    else
    // if odometry is disabled, odom_to_map_ actually contains the map->cam transform because I'm too lazy
    {
      motionModel_->UpdateCameraPose(cameraPose_.GetPosition(), cameraPose_.GetOrientationQuaternion());

      tf::Transform cam_to_map;
      CameraPose2TFPose(cameraPose_, cam_to_map);

      tf::StampedTransform base_to_cam;
      transform_listener_.lookupTransform(camera_frame_, base_frame_, currentTime, base_to_cam);

      std::lock_guard<std::mutex> lock( odom_to_map_mutex_ );
      odom_to_map_ = cam_to_map * base_to_cam;
    }
  }

  // Publish Map To be drawn by rviz visualizer
  publishMap();

  // Publish the camera Pose
  publishPose( l_image_msg->header.seq, currentTime, cameraPose_ );
}
	virtual	void BulldozerTest::GetSceneParams(PINT_WORLD_CREATE& desc)
	{
		VehicleBase::GetSceneParams(desc);
		desc.mCamera[0] = CameraPose(Point(8.54f, 4.57f, 7.70f), Point(-0.66f, -0.28f, -0.69f));
	}
Exemple #5
0
	virtual void OverlapObjects_GiantBoxVsKP::GetSceneParams(PINT_WORLD_CREATE& desc)
	{
		TestBase::GetSceneParams(desc);
		desc.mCamera[0] = CameraPose(Point(926.05f, 933.78f, 523.28f), Point(-0.03f, -0.00f, -1.00f));
	}
Exemple #6
0
	virtual void OverlapObjects_SpheresVsKP::GetSceneParams(PINT_WORLD_CREATE& desc)
	{
		TestBase::GetSceneParams(desc);
		desc.mCamera[0] = CameraPose(Point(1085.22f, 3963.36f, -9239.41f), Point(-0.59f, -0.59f, 0.56f));
	}
Exemple #7
0
	virtual void OverlapAny_BoxesVsPlanetSide::GetSceneParams(PINT_WORLD_CREATE& desc)
	{
		TestBase::GetSceneParams(desc);
		desc.mCamera[0] = CameraPose(Point(-480.68f, 1639.54f, -443.50f), Point(0.59f, -0.60f, 0.54f));
	}
	virtual	void	GetSceneParams(PINT_WORLD_CREATE& desc)
	{
		TestBase::GetSceneParams(desc);
		desc.mCamera[0] = CameraPose(Point(35.97f, 15.73f, 25.50f), Point(-0.58f, -0.10f, -0.81f));
	}
	virtual	void	GetSceneParams(PINT_WORLD_CREATE& desc)
	{
		TestBase::GetSceneParams(desc);
		desc.mCamera[0] = CameraPose(Point(29.76f, 12.63f, 18.12f), Point(-0.60f, -0.33f, -0.73f));
	}
Exemple #10
0
CameraPose StereoFrame::ComputeRightCameraPose(const CameraPose& leftCameraPose, const double stereo_baseline)
{
  // The position is the baseline converted to world coordinates.
  // The orientation is the same as the left camera.
  return CameraPose(leftCameraPose.ToWorld( Eigen::Vector3d(stereo_baseline, 0, 0) ), leftCameraPose.GetOrientationQuaternion(), leftCameraPose.covariance());
}