예제 #1
0
void
WmMapServer::step()
{

	if(!do_mapping_)
	{
		tf::StampedTransform World2Map;

		tf::Quaternion q;

		q.setEuler(0.0f, 0.0f, 0.0);

		World2Map.setOrigin(tf::Vector3(0.0, 0.0, 0.0));
		World2Map.setRotation(q);

		World2Map.frame_id_ = "/world";
		World2Map.stamp_ = ros::Time::now();
		World2Map.child_frame_id_ = worldFrameId_;

		try
		{
			tfBroadcaster_.sendTransform(World2Map);

		}catch(tf::TransformException & ex)
		{
				ROS_WARN("WmMapServer::step %s",ex.what());
		}
	}

	publishMap(ros::Time::now());
}
예제 #2
0
void HectorMappingRos::publishMapLoop(double map_pub_period)
{
  ros::Rate r(1.0 / map_pub_period);
  while(ros::ok())
    {
      


      //ros::WallTime t1 = ros::WallTime::now();
      ros::Time mapTime (ros::Time::now());
      //publishMap(mapPubContainer[2],slamProcessor->getGridMap(2), mapTime);
      //publishMap(mapPubContainer[1],slamProcessor->getGridMap(1), mapTime);
      
      
      //if(p_update_given_map_ || first_Time){
      publishMap(mapPubContainer[0],slamProcessor->getGridMap(0), mapTime, slamProcessor->getMapMutex(0));
      //first_Time = false;
      //}
      
      //ros::WallDuration t2 = ros::WallTime::now() - t1;
      
      //std::cout << "time s: " << t2.toSec();
      //ROS_INFO("HectorSM ms: %4.2f", t2.toSec()*1000.0f);
      
      r.sleep();
    }
}
void Node::mapPublishThread(ros::Rate rate)
{
  while(ros::ok()) {
    {
#ifdef USE_HECTOR_TIMING
      hector_diagnostics::TimingSection section("map publish");
#endif
      publishMap();
    }
    rate.sleep();
  }
}
void Node::reset()
{
  MapBase::UniqueLock lock(map_->getWriteLock());

  last_map_update_pose_.setIdentity();
  last_map_update_time_ = ros::Time();
  map_odom_transform_.setIdentity();

  if (map_) map_->reset();
  if (matcher_) matcher_->reset();

  lock.unlock();
  publishMap();
  publishPose();
}
예제 #5
0
void sptam::sptam_node::onImages(
  const sensor_msgs::ImageConstPtr& l_image_msg, const sensor_msgs::CameraInfoConstPtr& left_info,
  const sensor_msgs::ImageConstPtr& r_image_msg, const sensor_msgs::CameraInfoConstPtr& right_info
)
{
  ROS_INFO_STREAM("dt: " << (l_image_msg->header.stamp - r_image_msg->header.stamp).toSec());

  // Save current Time
  ros::Time currentTime = l_image_msg->header.stamp;

  // If using odometry, try to get a new estimate for the current pose.
  // if not using odometry, camera_to_odom is left blank.
  tf::StampedTransform camera_to_odom;
  if ( use_odometry_ )
  {
    if ( not getCameraInOdom(camera_to_odom, currentTime) )
      return;

    TFPose2CameraPose(odom_to_map_ * camera_to_odom, cameraPose_);
  }
  else
  {
    cv::Point3d currentCameraPosition;
    cv::Vec4d currentCameraOrientation;
    motionModel_->PredictNextCameraPose(currentCameraPosition, currentCameraOrientation);
    cameraPose_ = CameraPose( currentCameraPosition, currentCameraOrientation );
  }

  // If SPTAM has not been initialized yet, do it
  if ( sptam_ == nullptr )
  {
    ROS_INFO_STREAM("init calib");
    loadCameraCalibration(left_info, right_info);
  }

  // convert image to OpenCv cv::Mat format
  cv_bridge::CvImageConstPtr bridgeLeft_ptr = cv_bridge::toCvShare(l_image_msg, "rgb8");
  cv_bridge::CvImageConstPtr bridgeRight_ptr = cv_bridge::toCvShare(r_image_msg, "rgb8");

  // save images
  cv::Mat imageLeft = bridgeLeft_ptr->image;
  cv::Mat imageRight = bridgeRight_ptr->image;

  #ifdef SHOW_PROFILING
      double start, end;
      start = GetSeg();
  #endif // SHOW_PROFILING

  #ifdef SHOW_PROFILING
    double startStep, endStep;
    startStep = GetSeg();
  #endif

  FeatureExtractorThread featureExtractorThreadLeft(imageLeft, *featureDetector_, *descriptorExtractor_);
  FeatureExtractorThread featureExtractorThreadRight(imageRight, *featureDetector_, *descriptorExtractor_);

  featureExtractorThreadLeft.WaitUntilFinished();
  const std::vector<cv::KeyPoint>& keyPointsLeft = featureExtractorThreadLeft.GetKeyPoints();
  const cv::Mat& descriptorsLeft = featureExtractorThreadLeft.GetDescriptors();

  featureExtractorThreadRight.WaitUntilFinished();
  const std::vector<cv::KeyPoint>& keyPointsRight = featureExtractorThreadRight.GetKeyPoints();
  const cv::Mat& descriptorsRight = featureExtractorThreadRight.GetDescriptors();

  ImageFeatures imageFeaturesLeft = ImageFeatures(imageLeft, keyPointsLeft, descriptorsLeft, mapper_params_.matchingCellSize);
  ImageFeatures imageFeaturesRight = ImageFeatures(imageRight, keyPointsRight, descriptorsRight, mapper_params_.matchingCellSize);

  #ifdef SHOW_PROFILING
    endStep = GetSeg();
    WriteToLog(" tk Extract: ", startStep, endStep);
  #endif

  // if the map was not initialized, try to build it
  if( not map_.nMapPoints() )
  {
    ROS_INFO("Trying to intialize map...");

    // Create keyFrame
    StereoFrame* frame = new StereoFrame(
      cameraPose_, cameraParametersLeft_,
      stereo_baseline_, cameraParametersRight_,
      imageFeaturesLeft, imageFeaturesRight, true
    );

    frame->SetId( 0 ); // Set Keyframe ID

    // Initialize MapMaker
    // TODO: this bool must be a local variable to do not check not map_.nMapPoints() in the "if"
    /*bool isMapInitialized = */InitFromStereo( map_, *frame, imageLeft, *rowMatcher_);
  }
  // if the map is already initialized, do tracking
  else
  {

    cameraPose_ = sptam_->track(cameraPose_, imageFeaturesLeft, imageFeaturesRight, imageLeft, imageRight);

    #ifdef SHOW_PROFILING
      end = GetSeg();
      std::cout << GetSeg() << " tk trackingtotal: " << (end - start) << std::endl;

      std::stringstream message;
      message << std::fixed <<  GetSeg() << " tk trackingtotal: " << (end - start) << std::endl;
      Logger::Write( message.str() );
    #endif

    // fix the error between map and odom to correct the current pose.
    if ( use_odometry_ )
      fixOdomFrame( cameraPose_, camera_to_odom, currentTime );
    else
    // if odometry is disabled, odom_to_map_ actually contains the map->cam transform because I'm too lazy
    {
      motionModel_->UpdateCameraPose(cameraPose_.GetPosition(), cameraPose_.GetOrientationQuaternion());

      tf::Transform cam_to_map;
      CameraPose2TFPose(cameraPose_, cam_to_map);

      tf::StampedTransform base_to_cam;
      transform_listener_.lookupTransform(camera_frame_, base_frame_, currentTime, base_to_cam);

      std::lock_guard<std::mutex> lock( odom_to_map_mutex_ );
      odom_to_map_ = cam_to_map * base_to_cam;
    }
  }

  // Publish Map To be drawn by rviz visualizer
  publishMap();

  // Publish the camera Pose
  publishPose( l_image_msg->header.seq, currentTime, cameraPose_ );
}
예제 #6
0
void
WmMapServer::insertCloudCallback(const sensor_msgs::PointCloud2::ConstPtr& cloud_in)
{
	ros::WallTime startTime = ros::WallTime::now();

	if(!do_mapping_)
		return;

	pcl::PCLPointCloud2::Ptr cloud (new pcl::PCLPointCloud2 ());
	pcl::PCLPointCloud2::Ptr cloud_filtered (new pcl::PCLPointCloud2 ());

	pcl_conversions::toPCL(*cloud_in, *cloud);

	pcl::VoxelGrid<pcl::PCLPointCloud2> sor;
	sor.setInputCloud (cloud);
	sor.setLeafSize (res_, res_, res_);
	sor.filter (*cloud_filtered);


	pcl::PointCloud<pcl::PointXYZRGB>::Ptr pc(new pcl::PointCloud<pcl::PointXYZRGB>); // input cloud for filtering and ground-detection
	pcl::fromPCLPointCloud2 (*cloud_filtered, *pc);


	/*
	 * Filter in Robot space
	 */

	pcl::PointCloud<pcl::PointXYZRGB>::Ptr pc_filtered(new pcl::PointCloud<pcl::PointXYZRGB>); // input cloud for filtering and ground-detection

	tf::StampedTransform sensorToRobotTf;
	Eigen::Matrix4f sensorToRobot;
	try {
		tfListener_.lookupTransform(baseFrameId_, cloud_in->header.frame_id, cloud_in->header.stamp, sensorToRobotTf);
	} catch(tf::TransformException& ex){
		ROS_ERROR_STREAM( "Transform error of sensor data: " << ex.what() << ", quitting callback");
		return;
	}

	pcl_ros::transformAsMatrix(sensorToRobotTf, sensorToRobot);
	pcl::transformPointCloud(*pc, *pc, sensorToRobot);


	for(pcl::PointCloud<pcl::PointXYZRGB>::iterator it=pc->begin(); it!=pc->end(); ++it)
	{
		if(!std::isnan(it->x))
		{
			double thetaxy, thetaxz;

			thetaxy = atan2(it->y, it->x);
			thetaxz = atan2(it->z, it->x);

			if(fabs(thetaxy)<pointcloudMaxTxy_ && fabs(thetaxz)<pointcloudMaxTxz_ &&
					it->x < pointcloudMaxX_ &&
					it->z > pointcloudMinZ_ && it->z < pointcloudMaxZ_)
			pc_filtered->push_back(*it);
		}
	}

	/*
	 * Add to Map
	 */
	tf::StampedTransform RobotToWorldTf;
	try {
		tfListener_.lookupTransform(worldFrameId_, baseFrameId_, cloud_in->header.stamp, RobotToWorldTf);
	} catch(tf::TransformException& ex){
		ROS_ERROR_STREAM( "Transform error of sensor data: " << ex.what() << ", quitting callback");
		return;
	}

	Eigen::Matrix4f RobotToWorld;
	pcl_ros::transformAsMatrix(RobotToWorldTf, RobotToWorld);
	pcl::transformPointCloud(*pc_filtered, *pc_filtered, RobotToWorld);

	int c=0;

	bool newPoint = false;
	for(pcl::PointCloud<pcl::PointXYZRGB>::iterator it=pc_filtered->begin(); it!=pc_filtered->end(); ++it)
	if(validNewPoint(*it))
	{
			map_->push_back(*it);
			octree_->addPointToCloud (*it, map_);
			c++;

	}

	double total_elapsed = (ros::WallTime::now() - startTime).toSec();
	ROS_DEBUG("Pointcloud insertion in map done (%d new points, %zu pts total, %f sec)", c, map_->size(), total_elapsed);

	publishMap(cloud_in->header.stamp);

	return;
}