void KeyframeMapper::RGBDCallback(
  const ImageMsg::ConstPtr& rgb_msg,
  const ImageMsg::ConstPtr& depth_msg,
  const CameraInfoMsg::ConstPtr& info_msg)
{
  tf::StampedTransform transform;

  const ros::Time& time = rgb_msg->header.stamp;

  try{
    tf_listener_.waitForTransform(
     fixed_frame_, rgb_msg->header.frame_id, time, ros::Duration(0.1));
    tf_listener_.lookupTransform(
      fixed_frame_, rgb_msg->header.frame_id, time, transform);  
  }
  catch(...)
  {
    return;
  }
  
  // create a new frame and increment the counter
  rgbdtools::RGBDFrame frame;
  createRGBDFrameFromROSMessages(rgb_msg, depth_msg, info_msg, frame); 
  frame.index = rgbd_frame_index_;
  rgbd_frame_index_++;
  
  bool result = processFrame(frame, eigenAffineFromTf(transform));
  if (result) publishKeyframeData(keyframes_.size() - 1);
  
  publishPath();
}
示例#2
0
void pathROSToEigenAffine(
  const PathMsg& path_msg,
  AffineTransformVector& path)
{
  path.clear();
  path.resize(path_msg.poses.size());

  for (int idx = 0; idx < path.size(); ++idx)
  {
    tf::Transform pose_tf;
    tf::poseMsgToTF(path_msg.poses[idx].pose, pose_tf);
    path[idx] = eigenAffineFromTf(pose_tf);
  }
}
示例#3
0
void VisualOdometry::RGBDCallback(
  const ImageMsg::ConstPtr& rgb_msg,
  const ImageMsg::ConstPtr& depth_msg,
  const CameraInfoMsg::ConstPtr& info_msg)
{
  ros::WallTime start = ros::WallTime::now();

  // **** initialize ***************************************************

  if (!initialized_)
  {
    initialized_ = getBaseToCameraTf(rgb_msg->header);
    init_time_ = rgb_msg->header.stamp;
    if (!initialized_) return;

    motion_estimation_.setBaseToCameraTf(eigenAffineFromTf(b2c_));
  }

  // **** create frame *************************************************

  ros::WallTime start_frame = ros::WallTime::now();
  rgbdtools::RGBDFrame frame;
  createRGBDFrameFromROSMessages(rgb_msg, depth_msg, info_msg, frame); 
  ros::WallTime end_frame = ros::WallTime::now();

  // **** find features ************************************************

  ros::WallTime start_features = ros::WallTime::now();
  feature_detector_->findFeatures(frame);
  ros::WallTime end_features = ros::WallTime::now();

  // **** registration *************************************************
  
  ros::WallTime start_reg = ros::WallTime::now();
  AffineTransform m = motion_estimation_.getMotionEstimation(frame);
  tf::Transform motion = tfFromEigenAffine(m);
  f2b_ = motion * f2b_;
  ros::WallTime end_reg = ros::WallTime::now();

  // **** publish outputs **********************************************
  
  if (publish_tf_)    publishTf(rgb_msg->header);
  if (publish_odom_)  publishOdom(rgb_msg->header);
  if (publish_path_)  publishPath(rgb_msg->header);
  if (publish_pose_)  publishPoseStamped(rgb_msg->header);
  
  if (publish_feature_cloud_) publishFeatureCloud(frame);
  if (publish_feature_cov_) publishFeatureCovariances(frame);
  
  if (publish_model_cloud_) publishModelCloud();
  if (publish_model_cov_)   publishModelCovariances();

  // **** print diagnostics *******************************************

  ros::WallTime end = ros::WallTime::now();

  frame_count_++;
  
  int n_features = frame.keypoints.size();
  int n_valid_features = frame.n_valid_keypoints;
  int n_model_pts = motion_estimation_.getModelSize();

  double d_frame    = 1000.0 * (end_frame    - start_frame   ).toSec();
  double d_features = 1000.0 * (end_features - start_features).toSec();
  double d_reg      = 1000.0 * (end_reg      - start_reg     ).toSec();
  double d_total    = 1000.0 * (end          - start         ).toSec();

  diagnostics(n_features, n_valid_features, n_model_pts,
              d_frame, d_features, d_reg, d_total);
}
/** In the event that the keyframe poses change (from pose-graph solving)
 * this function will propagete teh changes in the path message
 */
void KeyframeMapper::updatePathFromKeyframePoses()
{   
  int kf_size = keyframes_.size();
  int f_size = path_msg_.poses.size();
  
  // temporary store the new path
  AffineTransformVector path_new;
  path_new.resize(f_size);
  
  if (kf_size < 2) return; 
  
  for (int kf_idx = 0; kf_idx < kf_size - 1; ++kf_idx)
  {
    // the indices of the current and next keyframes (a and b)   
    const rgbdtools::RGBDKeyframe& keyframe_a = keyframes_[kf_idx];
    const rgbdtools::RGBDKeyframe& keyframe_b = keyframes_[kf_idx + 1];
    
    // the corresponding frame indices
    int f_idx_a = keyframe_a.index;
    int f_idx_b = keyframe_b.index;
         
    // the new poses of keyframes a and b (after graph solving)
    tf::Transform kf_pose_a = tfFromEigenAffine(keyframe_a.pose);
    tf::Transform kf_pose_b = tfFromEigenAffine(keyframe_b.pose);
    
    // the previous pose of keyframe a and b (before graph solving)
    tf::Transform kf_pose_a_prev, kf_pose_b_prev;
    tf::poseMsgToTF(path_msg_.poses[f_idx_a].pose, kf_pose_a_prev);
    tf::poseMsgToTF(path_msg_.poses[f_idx_b].pose, kf_pose_b_prev);
    
    // the motion, in the camera frame (after and before graph solving)
    tf::Transform kf_motion      = kf_pose_a.inverse() * kf_pose_b;
    tf::Transform kf_motion_prev = kf_pose_a_prev.inverse() * kf_pose_b_prev;
    
    // the correction from the graph solving
    tf::Transform correction = kf_motion_prev.inverse() * kf_motion;
    
    // update the poses in-between keyframes
    for (int f_idx = f_idx_a; f_idx < f_idx_b; ++f_idx)
    {
      // calculate interpolation scale
      double interp_scale = (double)(f_idx - f_idx_a) / (double)(f_idx_b - f_idx_a);
      
      // create interpolated correction translation and rotation
      tf::Vector3 v_interp = correction.getOrigin() * interp_scale;
      tf::Quaternion q_interp = tf::Quaternion::getIdentity();
      q_interp.slerp(correction.getRotation(), interp_scale);
      
      // create interpolated correction
      tf::Transform interpolated_correction;
      interpolated_correction.setOrigin(v_interp);
      interpolated_correction.setRotation(q_interp);
      
      // the previous frame pose
      tf::Transform frame_pose_prev;
      tf::poseMsgToTF(path_msg_.poses[f_idx].pose, frame_pose_prev);
      
      // the pevious frame motion
      tf::Transform frame_motion_prev = kf_pose_a_prev.inverse() * frame_pose_prev;
      
      // the interpolated motion
      tf::Transform interpolated_motion = frame_motion_prev * interpolated_correction;
      
      // calculate the interpolated pose
      path_new[f_idx] = keyframe_a.pose * eigenAffineFromTf(interpolated_motion);
    }  
  }

  // update the last pose
  const rgbdtools::RGBDKeyframe& last_kf = keyframes_[kf_size - 1];

  tf::Transform last_kf_pose_prev;
  tf::poseMsgToTF(path_msg_.poses[last_kf.index].pose, last_kf_pose_prev);
  
  // update the poses in-between last keyframe and end of vo path
  for (int f_idx = last_kf.index; f_idx < f_size; ++f_idx)
  {
    // the previous frame pose
    tf::Transform frame_pose_prev;
    tf::poseMsgToTF(path_msg_.poses[f_idx].pose, frame_pose_prev);
    
    // the pevious frame motion
    tf::Transform frame_motion_prev = last_kf_pose_prev.inverse() * frame_pose_prev;
    
    // calculate the new pose
    path_new[f_idx] = last_kf.pose * eigenAffineFromTf(frame_motion_prev);
  } 
  
  // copy over the interpolated path
  pathEigenAffineToROS(path_new, path_msg_);
}