void KeyframeMapper::publishKeyframeAssociations()
{
  visualization_msgs::Marker marker;
  marker.header.stamp = ros::Time::now();
  marker.header.frame_id = fixed_frame_;
  marker.id = 0;
  marker.type = visualization_msgs::Marker::LINE_LIST;
  marker.action = visualization_msgs::Marker::ADD;

  marker.points.resize(associations_.size() * 2);
  
  marker.color.a = 1.0;

  for (unsigned int as_idx = 0; as_idx < associations_.size(); ++as_idx)
  {
    // set up shortcut references
    const rgbdtools::KeyframeAssociation& association = associations_[as_idx];
    int kf_idx_a = association.kf_idx_a;
    int kf_idx_b = association.kf_idx_b;
    rgbdtools::RGBDKeyframe& keyframe_a = keyframes_[kf_idx_a];
    rgbdtools::RGBDKeyframe& keyframe_b = keyframes_[kf_idx_b];

    int idx_start = as_idx*2;
    int idx_end   = as_idx*2 + 1;

    tf::Transform keyframe_a_pose = tfFromEigenAffine(keyframe_a.pose);
    tf::Transform keyframe_b_pose = tfFromEigenAffine(keyframe_b.pose);
 
    // start point for the edge
    marker.points[idx_start].x = keyframe_a_pose.getOrigin().getX();  
    marker.points[idx_start].y = keyframe_a_pose.getOrigin().getY();
    marker.points[idx_start].z = keyframe_a_pose.getOrigin().getZ();

    // end point for the edge
    marker.points[idx_end].x = keyframe_b_pose.getOrigin().getX();  
    marker.points[idx_end].y = keyframe_b_pose.getOrigin().getY();
    marker.points[idx_end].z = keyframe_b_pose.getOrigin().getZ();

    if (association.type == rgbdtools::KeyframeAssociation::VO)
    {
      marker.ns = "VO";
      marker.scale.x = 0.002;

      marker.color.r = 0.0;
      marker.color.g = 1.0;
      marker.color.b = 0.0;
    }
    else if (association.type == rgbdtools::KeyframeAssociation::RANSAC)
    {
      marker.ns = "RANSAC";
      marker.scale.x = 0.002;
      
      marker.color.r = 1.0;
      marker.color.g = 1.0;
      marker.color.b = 0.0;
    }

    kf_assoc_pub_.publish(marker);
  }
}
void KeyframeMapper::buildOctomap(octomap::OcTree& tree)
{
  ROS_INFO("Building Octomap...");
  
  octomap::point3d sensor_origin(0.0, 0.0, 0.0);  

  for (unsigned int kf_idx = 0; kf_idx < keyframes_.size(); ++kf_idx)
  {
    ROS_INFO("Processing keyframe %u", kf_idx);
    const rgbdtools::RGBDKeyframe& keyframe = keyframes_[kf_idx];
    
    PointCloudT cloud;
    keyframe.constructDensePointCloud(cloud, max_range_, max_stdev_);
           
    octomap::pose6d frame_origin = poseTfToOctomap(tfFromEigenAffine(keyframe.pose));

    // build octomap cloud from pcl cloud
    octomap::Pointcloud octomap_cloud;
    for (unsigned int pt_idx = 0; pt_idx < cloud.points.size(); ++pt_idx)
    {
      const PointT& p = cloud.points[pt_idx];
      if (!std::isnan(p.z))
        octomap_cloud.push_back(p.x, p.y, p.z);
    }
    
    tree.insertScan(octomap_cloud, sensor_origin, frame_origin);
  }
}
void KeyframeMapper::buildColorOctomap(octomap::ColorOcTree& tree)
{
  ROS_INFO("Building Octomap with color...");

  octomap::point3d sensor_origin(0.0, 0.0, 0.0);  

  for (unsigned int kf_idx = 0; kf_idx < keyframes_.size(); ++kf_idx)
  {
    ROS_INFO("Processing keyframe %u", kf_idx);
    const rgbdtools::RGBDKeyframe& keyframe = keyframes_[kf_idx];
       
    // construct the cloud
    PointCloudT::Ptr cloud_unf(new PointCloudT());
    keyframe.constructDensePointCloud(*cloud_unf, max_range_, max_stdev_);
  
    // perform filtering for max z
    pcl::transformPointCloud(*cloud_unf, *cloud_unf, keyframe.pose);
    PointCloudT cloud;
    pcl::PassThrough<PointT> pass;
    pass.setInputCloud (cloud_unf);
    pass.setFilterFieldName ("z");
    pass.setFilterLimits (-std::numeric_limits<double>::infinity(), max_map_z_);
    pass.filter(cloud);
    pcl::transformPointCloud(cloud, cloud, keyframe.pose.inverse());
    
    octomap::pose6d frame_origin = poseTfToOctomap(tfFromEigenAffine(keyframe.pose));
    
    // build octomap cloud from pcl cloud
    octomap::Pointcloud octomap_cloud;
    for (unsigned int pt_idx = 0; pt_idx < cloud.points.size(); ++pt_idx)
    {
      const PointT& p = cloud.points[pt_idx];
      if (!std::isnan(p.z))
        octomap_cloud.push_back(p.x, p.y, p.z);
    }
    
    // insert scan (only xyz considered, no colors)
    tree.insertScan(octomap_cloud, sensor_origin, frame_origin);
    
    // insert colors
    PointCloudT cloud_tf;
    pcl::transformPointCloud(cloud, cloud_tf, keyframe.pose);
    for (unsigned int pt_idx = 0; pt_idx < cloud_tf.points.size(); ++pt_idx)
    {
      const PointT& p = cloud_tf.points[pt_idx];
      if (!std::isnan(p.z))
      {
        octomap::point3d endpoint(p.x, p.y, p.z);
        octomap::ColorOcTreeNode* n = tree.search(endpoint);
        if (n) n->setColor(p.r, p.g, p.b); 
      }
    }
    
    tree.updateInnerOccupancy();
  }
}
bool KeyframeMapper::processFrame(
  const rgbdtools::RGBDFrame& frame, 
  const AffineTransform& pose)
{
  // add the frame pose to the path vector
  geometry_msgs::PoseStamped frame_pose; 
  tf::Transform frame_tf = tfFromEigenAffine(pose);
  tf::poseTFToMsg(frame_tf, frame_pose.pose);
 
  // update the header of the pose for the path
  frame_pose.header.frame_id = fixed_frame_;
  frame_pose.header.seq = frame.header.seq;
  frame_pose.header.stamp.sec = frame.header.stamp.sec;
  frame_pose.header.stamp.nsec = frame.header.stamp.nsec;
    
  path_msg_.poses.push_back(frame_pose);
   
  // determine if a new keyframe is needed
  bool result; 

  if(keyframes_.empty() || manual_add_)
  {
    result = true;
  }
  else
  {
    double dist, angle;
    getTfDifference(tfFromEigenAffine(pose), 
                    tfFromEigenAffine(keyframes_.back().pose), 
                    dist, angle);

    if (dist > kf_dist_eps_ || angle > kf_angle_eps_)
      result = true;
    else 
      result = false;
  }

  if (result)
  {
    addKeyframe(frame, pose);
  }
  return result;
}
示例#5
0
void pathEigenAffineToROS(
  const AffineTransformVector& path,
  PathMsg& path_msg)
{
  assert(path.size() == path_msg.poses.size());

  for (int idx = 0; idx < path.size(); ++idx)
  {
    tf::Transform pose_tf = tfFromEigenAffine(path[idx]);
    tf::poseTFToMsg(pose_tf, path_msg.poses[idx].pose);
  }
}
示例#6
0
void VisualOdometry::RGBDCallback(
  const ImageMsg::ConstPtr& rgb_msg,
  const ImageMsg::ConstPtr& depth_msg,
  const CameraInfoMsg::ConstPtr& info_msg)
{
  ros::WallTime start = ros::WallTime::now();

  // **** initialize ***************************************************

  if (!initialized_)
  {
    initialized_ = getBaseToCameraTf(rgb_msg->header);
    init_time_ = rgb_msg->header.stamp;
    if (!initialized_) return;

    motion_estimation_.setBaseToCameraTf(eigenAffineFromTf(b2c_));
  }

  // **** create frame *************************************************

  ros::WallTime start_frame = ros::WallTime::now();
  rgbdtools::RGBDFrame frame;
  createRGBDFrameFromROSMessages(rgb_msg, depth_msg, info_msg, frame); 
  ros::WallTime end_frame = ros::WallTime::now();

  // **** find features ************************************************

  ros::WallTime start_features = ros::WallTime::now();
  feature_detector_->findFeatures(frame);
  ros::WallTime end_features = ros::WallTime::now();

  // **** registration *************************************************
  
  ros::WallTime start_reg = ros::WallTime::now();
  AffineTransform m = motion_estimation_.getMotionEstimation(frame);
  tf::Transform motion = tfFromEigenAffine(m);
  f2b_ = motion * f2b_;
  ros::WallTime end_reg = ros::WallTime::now();

  // **** publish outputs **********************************************
  
  if (publish_tf_)    publishTf(rgb_msg->header);
  if (publish_odom_)  publishOdom(rgb_msg->header);
  if (publish_path_)  publishPath(rgb_msg->header);
  if (publish_pose_)  publishPoseStamped(rgb_msg->header);
  
  if (publish_feature_cloud_) publishFeatureCloud(frame);
  if (publish_feature_cov_) publishFeatureCovariances(frame);
  
  if (publish_model_cloud_) publishModelCloud();
  if (publish_model_cov_)   publishModelCovariances();

  // **** print diagnostics *******************************************

  ros::WallTime end = ros::WallTime::now();

  frame_count_++;
  
  int n_features = frame.keypoints.size();
  int n_valid_features = frame.n_valid_keypoints;
  int n_model_pts = motion_estimation_.getModelSize();

  double d_frame    = 1000.0 * (end_frame    - start_frame   ).toSec();
  double d_features = 1000.0 * (end_features - start_features).toSec();
  double d_reg      = 1000.0 * (end_reg      - start_reg     ).toSec();
  double d_total    = 1000.0 * (end          - start         ).toSec();

  diagnostics(n_features, n_valid_features, n_model_pts,
              d_frame, d_features, d_reg, d_total);
}
/** In the event that the keyframe poses change (from pose-graph solving)
 * this function will propagete teh changes in the path message
 */
void KeyframeMapper::updatePathFromKeyframePoses()
{   
  int kf_size = keyframes_.size();
  int f_size = path_msg_.poses.size();
  
  // temporary store the new path
  AffineTransformVector path_new;
  path_new.resize(f_size);
  
  if (kf_size < 2) return; 
  
  for (int kf_idx = 0; kf_idx < kf_size - 1; ++kf_idx)
  {
    // the indices of the current and next keyframes (a and b)   
    const rgbdtools::RGBDKeyframe& keyframe_a = keyframes_[kf_idx];
    const rgbdtools::RGBDKeyframe& keyframe_b = keyframes_[kf_idx + 1];
    
    // the corresponding frame indices
    int f_idx_a = keyframe_a.index;
    int f_idx_b = keyframe_b.index;
         
    // the new poses of keyframes a and b (after graph solving)
    tf::Transform kf_pose_a = tfFromEigenAffine(keyframe_a.pose);
    tf::Transform kf_pose_b = tfFromEigenAffine(keyframe_b.pose);
    
    // the previous pose of keyframe a and b (before graph solving)
    tf::Transform kf_pose_a_prev, kf_pose_b_prev;
    tf::poseMsgToTF(path_msg_.poses[f_idx_a].pose, kf_pose_a_prev);
    tf::poseMsgToTF(path_msg_.poses[f_idx_b].pose, kf_pose_b_prev);
    
    // the motion, in the camera frame (after and before graph solving)
    tf::Transform kf_motion      = kf_pose_a.inverse() * kf_pose_b;
    tf::Transform kf_motion_prev = kf_pose_a_prev.inverse() * kf_pose_b_prev;
    
    // the correction from the graph solving
    tf::Transform correction = kf_motion_prev.inverse() * kf_motion;
    
    // update the poses in-between keyframes
    for (int f_idx = f_idx_a; f_idx < f_idx_b; ++f_idx)
    {
      // calculate interpolation scale
      double interp_scale = (double)(f_idx - f_idx_a) / (double)(f_idx_b - f_idx_a);
      
      // create interpolated correction translation and rotation
      tf::Vector3 v_interp = correction.getOrigin() * interp_scale;
      tf::Quaternion q_interp = tf::Quaternion::getIdentity();
      q_interp.slerp(correction.getRotation(), interp_scale);
      
      // create interpolated correction
      tf::Transform interpolated_correction;
      interpolated_correction.setOrigin(v_interp);
      interpolated_correction.setRotation(q_interp);
      
      // the previous frame pose
      tf::Transform frame_pose_prev;
      tf::poseMsgToTF(path_msg_.poses[f_idx].pose, frame_pose_prev);
      
      // the pevious frame motion
      tf::Transform frame_motion_prev = kf_pose_a_prev.inverse() * frame_pose_prev;
      
      // the interpolated motion
      tf::Transform interpolated_motion = frame_motion_prev * interpolated_correction;
      
      // calculate the interpolated pose
      path_new[f_idx] = keyframe_a.pose * eigenAffineFromTf(interpolated_motion);
    }  
  }

  // update the last pose
  const rgbdtools::RGBDKeyframe& last_kf = keyframes_[kf_size - 1];

  tf::Transform last_kf_pose_prev;
  tf::poseMsgToTF(path_msg_.poses[last_kf.index].pose, last_kf_pose_prev);
  
  // update the poses in-between last keyframe and end of vo path
  for (int f_idx = last_kf.index; f_idx < f_size; ++f_idx)
  {
    // the previous frame pose
    tf::Transform frame_pose_prev;
    tf::poseMsgToTF(path_msg_.poses[f_idx].pose, frame_pose_prev);
    
    // the pevious frame motion
    tf::Transform frame_motion_prev = last_kf_pose_prev.inverse() * frame_pose_prev;
    
    // calculate the new pose
    path_new[f_idx] = last_kf.pose * eigenAffineFromTf(frame_motion_prev);
  } 
  
  // copy over the interpolated path
  pathEigenAffineToROS(path_new, path_msg_);
}
void KeyframeMapper::publishKeyframePose(int i)
{
  rgbdtools::RGBDKeyframe& keyframe = keyframes_[i];

  // **** publish camera pose

  visualization_msgs::Marker marker;
  marker.header.stamp = ros::Time::now();
  marker.header.frame_id = fixed_frame_;
  marker.ns = "keyframe_poses";
  marker.id = i;
  marker.type = visualization_msgs::Marker::ARROW;
  marker.action = visualization_msgs::Marker::ADD;

  marker.points.resize(2);

  // start point for the arrow
  tf::Transform keyframe_pose = tfFromEigenAffine(keyframe.pose);
  marker.points[0].x = keyframe_pose.getOrigin().getX();
  marker.points[0].y = keyframe_pose.getOrigin().getY();
  marker.points[0].z = keyframe_pose.getOrigin().getZ();

  // end point for the arrow
  tf::Transform ep; 
  ep.setIdentity();
  ep.setOrigin(tf::Vector3(0.00, 0.00, 0.12)); // z = arrow length
  ep = keyframe_pose * ep;

  marker.points[1].x = ep.getOrigin().getX();
  marker.points[1].y = ep.getOrigin().getY();
  marker.points[1].z = ep.getOrigin().getZ(); 
  
  marker.scale.x = 0.02; // shaft radius
  marker.scale.y = 0.05; // head radius

  marker.color.a = 1.0;
  marker.color.r = 0.0;
  marker.color.g = 1.0;
  marker.color.b = 0.0;

  poses_pub_.publish(marker);

  // **** publish frame index text

  visualization_msgs::Marker marker_text;
  marker_text.header.stamp = ros::Time::now();
  marker_text.header.frame_id = fixed_frame_;
  marker_text.ns = "keyframe_indexes";
  marker_text.id = i;
  marker_text.type = visualization_msgs::Marker::TEXT_VIEW_FACING;
  marker_text.action = visualization_msgs::Marker::ADD;

  tf::poseTFToMsg(keyframe_pose, marker_text.pose);

  marker_text.pose.position.z -= 0.05;

  char label[6];
  sprintf(label, "%d", i);
  marker_text.text = label;

  marker_text.color.a = 1.0;
  marker_text.color.r = 1.0;
  marker_text.color.g = 1.0;
  marker_text.color.b = 0.0;

  marker_text.scale.z = 0.05; // shaft radius

  poses_pub_.publish(marker_text);
}