void Explore::maxForward(){
	ROS_INFO("enter maxForward ");

	int counter = 0;

	float d_x = 0, d_y=0, x_map, y_map, x_odom, y_odom;
	transfromRobotToOdomPosition(d_x, d_y, x_odom, y_odom);
	 while (canMove(x_odom, y_odom)){
		d_x += 0.1;
		d_y = 0;
		transfromRobotToOdomPosition(d_x, d_y, x_odom, y_odom);
	/*	++counter;
		if(counter > 10){
			ROS_INFO("leave maxForward, can not move forward");
			return;
		}*/

	};

//	ROS_INFO("d_x = %f", d_x);

	float robotAngleInMap = getRobotAngleInMap();

	transfromRobotToMapPosition(d_x, d_y, x_map, y_map);
//	ROS_INFO("robot move to (%f,%f)", x_map, y_map);

	publishPose(x_map, y_map, robotAngleInMap);

//	ROS_INFO("wait for result");
//	ac_.waitForResult();

	ROS_INFO("leave maxForward");
}
示例#2
0
// Set and publish pose.
void Publisher::publishStateAsCallback(
    const okvis::Time & t, const okvis::kinematics::Transformation & T_WS)
{
  setTime(t);
  setPose(T_WS);  // TODO: provide setters for this hack
  publishPose();
}
示例#3
0
void Explore::randomForward(){

	double forward = 0.5 + ((2.0-0.3+1)*rand()/RAND_MAX+1.0);
	ROS_INFO("forward %f", forward);
	publishPose(forward, 0, 0, USE_ROBOT);
	
}
void Explore::randomForward(){

	ROS_INFO("enter randomForward ");

	int counter = 0;

	float rand_x, rand_y, x_odom, y_odom, x_map, y_map;
	do {
		rand_x = (rand() % 10) / 20.0;
		rand_y = 0;
		transfromRobotToOdomPosition(rand_x, rand_y, x_odom, y_odom);
		++counter;
		if(counter > 10){
			ROS_INFO("leave randomForward, can not move forward");
			return;
		}

	} while (!canMove(x_odom, y_odom));


	float robotAngleInMap = getRobotAngleInMap();

	transfromRobotToMapPosition(rand_x, rand_y, x_map, y_map);
//	ROS_INFO("robot move to (%f,%f)", x_map, y_map);

	publishPose(x_map, y_map, robotAngleInMap);

//	ROS_INFO("wait for result");
//	ac_.waitForResult();

	ROS_INFO("leave randomForward");
}
示例#5
0
void Explore::randomRotate(){

	float angle = 70 + ((360-90+1)*rand()/(RAND_MAX+1.0));
	angle = ((angle) * PI) / 180.0;
	publishPose(0, 0, angle, USE_ROBOT);

}
bool Node::update()
{
  // get pose from tf
  if (p_use_tf_pose_start_estimate_) {
    tf::StampedTransform odom_pose;
    try {
      getTransformListener().waitForTransform(p_odom_frame_, p_base_frame_, scan_.getStamp(), ros::Duration(1.0));
      getTransformListener().lookupTransform(p_odom_frame_, p_base_frame_, scan_.getStamp(), odom_pose);

      matcher_->setTransform(map_odom_transform_ * odom_pose);
    } catch(tf::TransformException& e) {
      ROS_ERROR("Could not get pose from tf: %s", e.what());
      return false;
    }
  }

  // match scan
  if (!map_->empty()) {
#ifdef USE_HECTOR_TIMING
    hector_diagnostics::TimingSection section("scan matcher");
#endif
    matcher_->computeCovarianceIf(pose_with_covariance_publisher_ && pose_with_covariance_publisher_.getNumSubscribers() > 0);
    matcher_->match(*map_, scan_);
    if (!matcher_->valid()) return false;
  }

  // update map
  float_t position_difference, orientation_difference;
  matcher_->getPoseDifference(last_map_update_pose_, position_difference, orientation_difference);
  if (map_->empty() || position_difference > p_map_update_translational_threshold_ || orientation_difference > p_map_update_angular_threshold_) {
#ifdef USE_HECTOR_TIMING
    hector_diagnostics::TimingSection section("map update");
#endif
    // ros::WallTime _map_update_start = ros::WallTime::now();
      map_->insert(scan_, matcher_->getTransform());
    // ROS_DEBUG("Map update took %f seconds.", (ros::WallTime::now() - _map_update_start).toSec());

    last_map_update_pose_ = matcher_->getTransform();
    last_map_update_time_ = scan_.getStamp();
  }

  // publish pose
  publishPose();

  // publish tf
  if (p_pub_map_odom_transform_) publishTf();

  // publish timing
#ifdef USE_HECTOR_TIMING
    timing_publisher_.publish(hector_diagnostics::TimingAggregator::Instance()->update(matcher_->getStamp()));
#endif

  return true;
}
示例#7
0
void VoEstimator::publishUpdate(int64_t utime,
                                Eigen::Isometry3d local_to_head, std::string channel, bool output_alpha_filter){
  if ((!pose_initialized_) || (!vo_initialized_)) {
    std::cout << (int) pose_initialized_ << " pose\n";
    std::cout << (int) vo_initialized_ << " vo\n";
    std::cout << "pose, vo, zheight not initialized, refusing to publish POSE_HEAD nad POSE_BODY\n";
    return;
  }

  // Send vo pose to collections:
  Isometry3dTime local_to_headT = Isometry3dTime(utime, local_to_head);
  pc_vis_->pose_to_lcm_from_list(60000, local_to_headT);
  // std::cout << head_rot_rate_.transpose() << " head rot rate out\n";
  // std::cout << head_lin_rate_.transpose() << " head lin rate out\n";

  // publish local to head pose
  if (output_alpha_filter){
    publishPose(utime, channel + channel_extension_, local_to_head, head_lin_rate_alpha_, head_rot_rate_alpha_);
  }else{
    publishPose(utime, channel + channel_extension_, local_to_head, head_lin_rate_, head_rot_rate_);
  }
}
void Node::reset()
{
  MapBase::UniqueLock lock(map_->getWriteLock());

  last_map_update_pose_.setIdentity();
  last_map_update_time_ = ros::Time();
  map_odom_transform_.setIdentity();

  if (map_) map_->reset();
  if (matcher_) matcher_->reset();

  lock.unlock();
  publishMap();
  publishPose();
}
示例#9
0
void Explore::maxForward(){

	float d_x = 0.1, d_y=0.0, x_map, y_map;
	transfromRobotToMapPosition(d_x, d_y, x_map, y_map);
		
	 while (canMove(x_map, y_map)){

		d_x += 0.1;
		transfromRobotToMapPosition(d_x, d_y, x_map, y_map);

	}; 
	ROS_INFO("d_x = %f", d_x);

	publishPose(x_map, y_map, get, getRobotAngleInMap(), USE_MAP);

}
示例#10
0
/// @brief The event loop. Basically an ever running while with ros::spinOnce()
/// This is a re-implementation taking into care the memory scopes and also processing only points with higher image gradients
void ImuDeadReckon::eventLoop()
{
    ros::Rate rate(100); //100Hz
    while( ros::ok() )
    {
        ros::spinOnce();

        if( !(this->isIMUDataReady) )
            continue;


        updateNominalStateWithCurrentMeasurements();
        publishPose();


        ROS_INFO_STREAM_THROTTLE( 5, "(every5 sec) Lin Acc : "<< lin_acc.transpose() );
        ROS_INFO_STREAM_THROTTLE( 5, "Ang Vel : "<< ang_vel.transpose() );

        rate.sleep();
    }

}
示例#11
0
void sptam::sptam_node::onImages(
  const sensor_msgs::ImageConstPtr& l_image_msg, const sensor_msgs::CameraInfoConstPtr& left_info,
  const sensor_msgs::ImageConstPtr& r_image_msg, const sensor_msgs::CameraInfoConstPtr& right_info
)
{
  ROS_INFO_STREAM("dt: " << (l_image_msg->header.stamp - r_image_msg->header.stamp).toSec());

  // Save current Time
  ros::Time currentTime = l_image_msg->header.stamp;

  // If using odometry, try to get a new estimate for the current pose.
  // if not using odometry, camera_to_odom is left blank.
  tf::StampedTransform camera_to_odom;
  if ( use_odometry_ )
  {
    if ( not getCameraInOdom(camera_to_odom, currentTime) )
      return;

    TFPose2CameraPose(odom_to_map_ * camera_to_odom, cameraPose_);
  }
  else
  {
    cv::Point3d currentCameraPosition;
    cv::Vec4d currentCameraOrientation;
    motionModel_->PredictNextCameraPose(currentCameraPosition, currentCameraOrientation);
    cameraPose_ = CameraPose( currentCameraPosition, currentCameraOrientation );
  }

  // If SPTAM has not been initialized yet, do it
  if ( sptam_ == nullptr )
  {
    ROS_INFO_STREAM("init calib");
    loadCameraCalibration(left_info, right_info);
  }

  // convert image to OpenCv cv::Mat format
  cv_bridge::CvImageConstPtr bridgeLeft_ptr = cv_bridge::toCvShare(l_image_msg, "rgb8");
  cv_bridge::CvImageConstPtr bridgeRight_ptr = cv_bridge::toCvShare(r_image_msg, "rgb8");

  // save images
  cv::Mat imageLeft = bridgeLeft_ptr->image;
  cv::Mat imageRight = bridgeRight_ptr->image;

  #ifdef SHOW_PROFILING
      double start, end;
      start = GetSeg();
  #endif // SHOW_PROFILING

  #ifdef SHOW_PROFILING
    double startStep, endStep;
    startStep = GetSeg();
  #endif

  FeatureExtractorThread featureExtractorThreadLeft(imageLeft, *featureDetector_, *descriptorExtractor_);
  FeatureExtractorThread featureExtractorThreadRight(imageRight, *featureDetector_, *descriptorExtractor_);

  featureExtractorThreadLeft.WaitUntilFinished();
  const std::vector<cv::KeyPoint>& keyPointsLeft = featureExtractorThreadLeft.GetKeyPoints();
  const cv::Mat& descriptorsLeft = featureExtractorThreadLeft.GetDescriptors();

  featureExtractorThreadRight.WaitUntilFinished();
  const std::vector<cv::KeyPoint>& keyPointsRight = featureExtractorThreadRight.GetKeyPoints();
  const cv::Mat& descriptorsRight = featureExtractorThreadRight.GetDescriptors();

  ImageFeatures imageFeaturesLeft = ImageFeatures(imageLeft, keyPointsLeft, descriptorsLeft, mapper_params_.matchingCellSize);
  ImageFeatures imageFeaturesRight = ImageFeatures(imageRight, keyPointsRight, descriptorsRight, mapper_params_.matchingCellSize);

  #ifdef SHOW_PROFILING
    endStep = GetSeg();
    WriteToLog(" tk Extract: ", startStep, endStep);
  #endif

  // if the map was not initialized, try to build it
  if( not map_.nMapPoints() )
  {
    ROS_INFO("Trying to intialize map...");

    // Create keyFrame
    StereoFrame* frame = new StereoFrame(
      cameraPose_, cameraParametersLeft_,
      stereo_baseline_, cameraParametersRight_,
      imageFeaturesLeft, imageFeaturesRight, true
    );

    frame->SetId( 0 ); // Set Keyframe ID

    // Initialize MapMaker
    // TODO: this bool must be a local variable to do not check not map_.nMapPoints() in the "if"
    /*bool isMapInitialized = */InitFromStereo( map_, *frame, imageLeft, *rowMatcher_);
  }
  // if the map is already initialized, do tracking
  else
  {

    cameraPose_ = sptam_->track(cameraPose_, imageFeaturesLeft, imageFeaturesRight, imageLeft, imageRight);

    #ifdef SHOW_PROFILING
      end = GetSeg();
      std::cout << GetSeg() << " tk trackingtotal: " << (end - start) << std::endl;

      std::stringstream message;
      message << std::fixed <<  GetSeg() << " tk trackingtotal: " << (end - start) << std::endl;
      Logger::Write( message.str() );
    #endif

    // fix the error between map and odom to correct the current pose.
    if ( use_odometry_ )
      fixOdomFrame( cameraPose_, camera_to_odom, currentTime );
    else
    // if odometry is disabled, odom_to_map_ actually contains the map->cam transform because I'm too lazy
    {
      motionModel_->UpdateCameraPose(cameraPose_.GetPosition(), cameraPose_.GetOrientationQuaternion());

      tf::Transform cam_to_map;
      CameraPose2TFPose(cameraPose_, cam_to_map);

      tf::StampedTransform base_to_cam;
      transform_listener_.lookupTransform(camera_frame_, base_frame_, currentTime, base_to_cam);

      std::lock_guard<std::mutex> lock( odom_to_map_mutex_ );
      odom_to_map_ = cam_to_map * base_to_cam;
    }
  }

  // Publish Map To be drawn by rviz visualizer
  publishMap();

  // Publish the camera Pose
  publishPose( l_image_msg->header.seq, currentTime, cameraPose_ );
}
示例#12
0
//**************************************************************************************************************************getMotion()
void PSMpositionNode::getMotion(const sensor_msgs::LaserScan& scan,  std::vector <depthPoint> *depthLine)
{
    ROS_DEBUG("Received kinectLine");

    // **** attmempt to match the two scans

    // PM scan matcher is used in the following way:
    // The reference scan (prevPMScan_) always has a pose of 0
    // The new scan (currPMScan) has a pose equal to the movement
    // of the laser in the world frame since the last scan (btTransform change)
    // The computed correction is then propagated using the tf machinery
    if (!initialized_)
    {
        initialized_ = initialize(scan);
        if (initialized_) ROS_INFO("Matcher Horizontal initialized");
        return;
    }

    prevPMScan_->rx = 0;
    prevPMScan_->ry = 0;
    prevPMScan_->th = 0;

    btTransform currWorldToBase;
    btTransform change;
    change.setIdentity();

    // what odometry model to use
    if (useTfOdometry_)
    {
        // get the current position of the base in the world frame
        // if no transofrm is available, we'll use the last known transform

        getCurrentEstimatedPose(currWorldToBase, scan);
        change = laserToBase_ * prevWorldToBase_.inverse() * currWorldToBase * baseToLaser_;
    }
    else if (useImuOdometry_)
    {
        imuMutex_.lock();
        double dTheta = currImuAngle_ - prevImuAngle_;
        prevImuAngle_ = currImuAngle_;
        change.getRotation().setRPY(0.0, 0.0, dTheta);
        imuMutex_.unlock();
    }

    PMScan * currPMScan = new PMScan(scan.ranges.size());

    rosToPMScan(scan, change, currPMScan);


    try
    {
        matcher_.pm_psm(prevPMScan_, currPMScan);
    }
    catch(int err)
    {
        ROS_WARN("Error in scan matching");
        delete prevPMScan_;
        prevPMScan_ = currPMScan;
        return;
    };

    // **** calculate change in position

    // rotate by -90 degrees, since polar scan matcher assumes different laser frame
    // and scale down by 100
    double dx =  currPMScan->ry / ROS_TO_PM;
    double dy = -currPMScan->rx / ROS_TO_PM;
    double da =  currPMScan->th;

    // change = scan match result for how much laser moved between scans,
    // in the world frame
    change.setOrigin(btVector3(dx, dy, 0.0));
    btQuaternion q;
    q.setRPY(0, 0, da);
    change.setRotation(q);


    // **** publish the new estimated pose as a tf
    if(take_vicon == true) {
        prevWorldToBase_.setOrigin(btVector3(pos_vicon[0],pos_vicon[1], pos_vicon[2]));
        btQuaternion vicon_q(quat_vicon.x(),quat_vicon.y(),quat_vicon.z(),quat_vicon.w());
        prevWorldToBase_.setRotation(vicon_q);
        prevWorldToBase_.setIdentity();
        take_vicon=false;
    }

    currWorldToBase = prevWorldToBase_ * baseToLaser_ * change * laserToBase_;

    heightLine = ransac(depthLine);

    frameP_ = worldFrame_;

    if (publishTf_  ) publishTf  (currWorldToBase, scan.header.stamp);

    if (publishPose_) publishPose(currWorldToBase, scan.header.stamp, frameP_, heightLine);

    // **** swap old and new

    delete prevPMScan_;
    prevPMScan_      = currPMScan;
    prevWorldToBase_ = currWorldToBase;

}
示例#13
0
void PSMpositionNode::getMotion_can(const sensor_msgs::LaserScan& scan,  std::vector <depthPoint> *depthLine)//*********************************************getMotion_can()
{
    ROS_DEBUG("Received scan");


    // **** if this is the first scan, initialize and leave the function here

    if (!initialized_can)
    {
        initialized_can = initializeCan(scan);
        if (initialized_can) ROS_INFO("Matcher initialized");
        return;
    }

    // **** attmempt to match the two scans

    // CSM is used in the following way:
    // The reference scan (prevLDPcan_) always has a pose of 0
    // The new scan (currLDPScan) has a pose equal to the movement
    // of the laser in the world frame since the last scan (btTransform change)
    // The computed correction is then propagated using the tf machinery

    prevLDPScan_->odometry[0] = 0;
    prevLDPScan_->odometry[1] = 0;
    prevLDPScan_->odometry[2] = 0;

    prevLDPScan_->estimate[0] = 0;
    prevLDPScan_->estimate[1] = 0;
    prevLDPScan_->estimate[2] = 0;

    prevLDPScan_->true_pose[0] = 0;
    prevLDPScan_->true_pose[1] = 0;
    prevLDPScan_->true_pose[2] = 0;

    btTransform currWorldToBase;
    btTransform change;
    change.setIdentity();

    // what odometry model to use
    if (useTfOdometry_)
    {
        // get the current position of the base in the world frame
        // if no transofrm is available, we'll use the last known transform

        getCurrentEstimatedPose(currWorldToBase, scan);
        change = laserToBase_ * prevWorldToBase_.inverse() * currWorldToBase * baseToLaser_;
    }
    else if (useImuOdometry_)
    {
        imuMutex_.lock();
        double dTheta = currImuAngle_ - prevImuAngle_;
        prevImuAngle_ = currImuAngle_;
        change.getRotation().setRPY(0.0, 0.0, dTheta);
        imuMutex_.unlock();
    }

    geometry_msgs::Pose2D p;
    tfToPose2D(change, p);
    LDP currLDPScan = rosToLDPScan(scan, p);

    input_.laser_ref  = prevLDPScan_;
    input_.laser_sens = currLDPScan;
    input_.first_guess[0] = 0;
    input_.first_guess[1] = 0;
    input_.first_guess[2] = 0;

    sm_icp(&input_, &output_);

    if (!output_.valid)
    {
        ROS_WARN("Error in scan matching");
        ld_free(prevLDPScan_);
        prevLDPScan_ = currLDPScan;
        return;
    }

    // **** calculate change in position

    double dx = output_.x[0];
    double dy = output_.x[1];
    double da = output_.x[2];

    // change = scan match result for how much laser moved between scans,
    // in the world frame
    change.setOrigin(btVector3(dx, dy, 0.0));
    btQuaternion q;
    q.setRPY(0, 0, da);
    change.setRotation(q);

    frameP_ = worldFrame_;
    heightLine = ransac(depthLine);
    // **** publish the new estimated pose as a tf

    if(take_vicon == true) {
        prevWorldToBase_.setOrigin(btVector3(pos_vicon[0],pos_vicon[1], pos_vicon[2]));
        btQuaternion vicon_q(quat_vicon.x(),quat_vicon.y(),quat_vicon.z(),quat_vicon.w());
        prevWorldToBase_.setRotation(vicon_q);
    }

    currWorldToBase = prevWorldToBase_ * baseToLaser_ * change * laserToBase_;

    if (publishTf_  ) publishTf  (currWorldToBase, scan.header.stamp);
    if (publishPose_) publishPose(currWorldToBase, scan.header.stamp, frameP_, heightLine);

    // **** swap old and new

    ld_free(prevLDPScan_);
    prevLDPScan_ = currLDPScan;
    prevWorldToBase_ = currWorldToBase;

    // **** timing information - needed for profiling only

}
示例#14
0
void Explore::robotFullRotate(){
	float angle = (360 * PI) / 180.0;
	publishPose(0, 0, angle, USE_ROBOT);
}