void KeyframeMapper::RGBDCallback( const ImageMsg::ConstPtr& rgb_msg, const ImageMsg::ConstPtr& depth_msg, const CameraInfoMsg::ConstPtr& info_msg) { tf::StampedTransform transform; const ros::Time& time = rgb_msg->header.stamp; try{ tf_listener_.waitForTransform( fixed_frame_, rgb_msg->header.frame_id, time, ros::Duration(0.1)); tf_listener_.lookupTransform( fixed_frame_, rgb_msg->header.frame_id, time, transform); } catch(...) { return; } // create a new frame and increment the counter rgbdtools::RGBDFrame frame; createRGBDFrameFromROSMessages(rgb_msg, depth_msg, info_msg, frame); frame.index = rgbd_frame_index_; rgbd_frame_index_++; bool result = processFrame(frame, eigenAffineFromTf(transform)); if (result) publishKeyframeData(keyframes_.size() - 1); publishPath(); }
void FeatureViewer::RGBDCallback( const ImageMsg::ConstPtr& rgb_msg, const ImageMsg::ConstPtr& depth_msg, const CameraInfoMsg::ConstPtr& info_msg) { mutex_.lock(); ros::WallTime start = ros::WallTime::now(); // create frame rgbdtools::RGBDFrame frame; createRGBDFrameFromROSMessages(rgb_msg, depth_msg, info_msg, frame); // find features feature_detector_->findFeatures(frame); ros::WallTime end = ros::WallTime::now(); // visualize if (show_keypoints_) showKeypointImage(frame); if (publish_cloud_) publishFeatureCloud(frame); if (publish_covariances_) publishFeatureCovariances(frame); // print diagnostics int n_features = frame.keypoints.size(); int n_valid_features = frame.n_valid_keypoints; double d_total = 1000.0 * (end - start).toSec(); printf("[FV %d] %s[%d][%d]: TOTAL %3.1f\n", frame_count_, detector_type_.c_str(), n_features, n_valid_features, d_total); frame_count_++; mutex_.unlock(); }
void VisualOdometry::RGBDCallback( const ImageMsg::ConstPtr& rgb_msg, const ImageMsg::ConstPtr& depth_msg, const CameraInfoMsg::ConstPtr& info_msg) { ros::WallTime start = ros::WallTime::now(); // **** initialize *************************************************** if (!initialized_) { initialized_ = getBaseToCameraTf(rgb_msg->header); init_time_ = rgb_msg->header.stamp; if (!initialized_) return; motion_estimation_.setBaseToCameraTf(eigenAffineFromTf(b2c_)); } // **** create frame ************************************************* ros::WallTime start_frame = ros::WallTime::now(); rgbdtools::RGBDFrame frame; createRGBDFrameFromROSMessages(rgb_msg, depth_msg, info_msg, frame); ros::WallTime end_frame = ros::WallTime::now(); // **** find features ************************************************ ros::WallTime start_features = ros::WallTime::now(); feature_detector_->findFeatures(frame); ros::WallTime end_features = ros::WallTime::now(); // **** registration ************************************************* ros::WallTime start_reg = ros::WallTime::now(); AffineTransform m = motion_estimation_.getMotionEstimation(frame); tf::Transform motion = tfFromEigenAffine(m); f2b_ = motion * f2b_; ros::WallTime end_reg = ros::WallTime::now(); // **** publish outputs ********************************************** if (publish_tf_) publishTf(rgb_msg->header); if (publish_odom_) publishOdom(rgb_msg->header); if (publish_path_) publishPath(rgb_msg->header); if (publish_pose_) publishPoseStamped(rgb_msg->header); if (publish_feature_cloud_) publishFeatureCloud(frame); if (publish_feature_cov_) publishFeatureCovariances(frame); if (publish_model_cloud_) publishModelCloud(); if (publish_model_cov_) publishModelCovariances(); // **** print diagnostics ******************************************* ros::WallTime end = ros::WallTime::now(); frame_count_++; int n_features = frame.keypoints.size(); int n_valid_features = frame.n_valid_keypoints; int n_model_pts = motion_estimation_.getModelSize(); double d_frame = 1000.0 * (end_frame - start_frame ).toSec(); double d_features = 1000.0 * (end_features - start_features).toSec(); double d_reg = 1000.0 * (end_reg - start_reg ).toSec(); double d_total = 1000.0 * (end - start ).toSec(); diagnostics(n_features, n_valid_features, n_model_pts, d_frame, d_features, d_reg, d_total); }