예제 #1
0
void BenchmarkNode::runFromFolder()
{
  for(int img_id = 2; img_id < 188; ++img_id)
  {
    // load image
    std::stringstream ss;
    ss << svo::test_utils::getDatasetDir() << "/sin2_tex2_h1_v8_d/img/frame_"
       << std::setw( 6 ) << std::setfill( '0' ) << img_id << "_0.png";
    if(img_id == 2)
      std::cout << "reading image " << ss.str() << std::endl;
    cv::Mat img(cv::imread(ss.str().c_str(), 0));
    assert(!img.empty());

    // process frame
    vo_->addImage(img, 0.01*img_id);

    // display tracking quality
    if(vo_->lastFrame() != NULL)
    {
    	std::cout << "Frame-Id: " << vo_->lastFrame()->id_ << " \t"
                  << "#Features: " << vo_->lastNumObservations() << " \t"
                  << "Proc. Time: " << vo_->lastProcessingTime()*1000 << "ms \n";

    	// access the pose of the camera via vo_->lastFrame()->T_f_w_.
    }
  }
}
예제 #2
0
void VoNode::processUserActions()
{
  char input = remote_input_.c_str()[0];
  remote_input_ = "";

  if(user_input_thread_ != NULL)
  {
    char console_input = user_input_thread_->getInput();
    if(console_input != 0)
      input = console_input;
  }

  switch(input)
  {
    case 'q':
      quit_ = true;
      printf("SVO user input: QUIT\n");
      break;
    case 'r':
      vo_->reset();
      printf("SVO user input: RESET\n");
      break;
    case 's':
      vo_->start();
      printf("SVO user input: START\n");
      break;
    default: ;
  }
}
예제 #3
0
void BenchmarkNode::runFromFolder()
{
  cv::line(*plot_, cv::Point(250,0), cv::Point(250,500), cv::Scalar(255,0,255));
  cv::line(*plot_, cv::Point(0,250), cv::Point(500,250), cv::Scalar(255, 0, 255));

  cv::Point origin = cv::Point(250,250);
  cv::Point prevPoint1 = origin, curPoint1 = origin;
  cv::Point prevPoint2 = origin, curPoint2 = origin;
  for(int img_id = 2; img_id < 188; ++img_id)
  {
    // load image
    std::stringstream ss;
    ss << svo::test_utils::getDatasetDir() << "/sin2_tex2_h1_v8_d/img/frame_"
       << std::setw( 6 ) << std::setfill( '0' ) << img_id << "_0.png";
    if(img_id == 2)
      std::cout << "reading image " << ss.str() << std::endl;
    cv::Mat img(cv::imread(ss.str().c_str(), 0));
    assert(!img.empty());

    // process frame
    vo_->addImage(img, 0.01*img_id);

    // display tracking quality
    if(vo_->lastFrame() != NULL)
    {
    	std::cout << "Frame-Id: " << vo_->lastFrame()->id_ << " \t"
                  << "#Features: " << vo_->lastNumObservations() << " \t"
                  << "Proc. Time: " << vo_->lastProcessingTime()*1000 << "ms \n";

    	// access the pose of the camera via vo_->lastFrame()->T_f_w_.
        Vector3d pos = vo_->lastFrame()->T_f_w_.translation();

        std::cout << "\tvo_->lastFrame()->pos() XPos: " << vo_->lastFrame()->pos().x() << " \t"
                << "YPos: " << vo_->lastFrame()->pos().y() << " \t"
                << "ZPos: " << vo_->lastFrame()->pos().z() << "\n";
        std::cout << "\tlastFrame()->T_f_w.transation() XPos: " << pos.x() << " \t"
                << "YPos: " << pos.y() << " \t"
                << "ZPos: " << pos.z() << "\n";
        curPoint2 = origin + cv::Point(pos.x() * 13, pos.y() * 13);
        curPoint1 = origin + cv::Point(vo_->lastFrame()->pos().x() * 13, vo_->lastFrame()->pos().y() * 13);
      cv::circle(*plot_, prevPoint1, 2, cv::Scalar(0,255,0));
      cv::circle(*plot_, curPoint1, 2, cv::Scalar(0,255,0));
      cv::circle(*plot_, prevPoint2, 2, cv::Scalar(0,0,255));
      cv::circle(*plot_, curPoint2, 2, cv::Scalar(0,0,255));
     // cv::line(*plot_, prevPoint1, curPoint1, cv::Scalar(0,255,0));
     // cv::line(*plot_, prevPoint2, curPoint2, cv::Scalar(0,0,255));
      prevPoint1 = curPoint1;
      prevPoint2 = curPoint2;
      cv::imshow("plot_", *plot_);
      cv::waitKey(0);
    }
  }
}
예제 #4
0
void BenchmarkNode::runFromFolder(int start)
{
  ofstream outfile;
  outfile.open ("/home/worxli/Datasets/data/associate_unscaled.txt");
  // outfile.open ("/home/worxli/data/test/associate_unscaled.txt");
  for(int img_id = start;;++img_id)
  {

    // load image
    std::stringstream ss;
    ss << "/home/worxli/Datasets/data/img/color" << img_id << ".png";
	  // ss << "/home/worxli/data/test/img/color" << img_id << ".png";
    std::cout << "reading image " << ss.str() << std::endl;
    cv::Mat img(cv::imread(ss.str().c_str(), 0));

    // end loop if no images left
    if(img.empty())
      break;

    assert(!img.empty());

    // process frame
    vo_->addImage(img, img_id);

    // display tracking quality
    if(vo_->lastFrame() != NULL)
    {
      int id = vo_->lastFrame()->id_;
    	std::cout << "Frame-Id: " << id << " \t"
                  << "#Features: " << vo_->lastNumObservations() << " \t"
                  << "Proc. Time: " << vo_->lastProcessingTime()*1000 << "ms \n";

    	// access the pose of the camera via vo_->lastFrame()->T_f_w_.
	//std::cout << vo_->lastFrame()->T_f_w_ << endl;

	//std::count << vo_->lastFrame()->pos() << endl;
  Quaterniond quat = vo_->lastFrame()->T_f_w_.unit_quaternion();
  Vector3d trans = vo_->lastFrame()->T_f_w_.translation();

	outfile << trans.x() << " "
          << trans.y() << " "
          << trans.z() << " " 

          << quat.x()  << " " 
          << quat.y()  << " " 
          << quat.z()  << " " 
          << quat.w()  << " " 

          << "depth/mapped" << img_id << ".png "
          << "img/color" << img_id << ".png\n";

    }
  }
  outfile.close();
}
예제 #5
0
파일: vo_node.cpp 프로젝트: ruffsl/rpg_svo
bool VoNode::initCb(){
    srv.request.timeA = time_first;
    srv.request.timeB = time_second;

    std::cout<<"\ntime_first:\n"<<(time_first)<<std::endl;
    std::cout<<"\ntime_second:\n"<<(time_second)<<std::endl;

    ROS_INFO("Calling service get_between_pose");

    if (client.call(srv)){
        ROS_INFO("Got inti pose from get_between_pose srv!");
        geometry_msgs::Pose m = srv.response.A2B;;
        Eigen::Affine3d e = Eigen::Translation3d(m.position.x,
                                               m.position.y,
                                               m.position.z) *
              Eigen::Quaterniond(m.orientation.w,
                                 m.orientation.x,
                                 m.orientation.y,
                                 m.orientation.z);

        Eigen::Matrix3d rot = e.rotation();
        Eigen::Vector3d trans = e.translation();

        std::cout<<"Time diff in first and second:\n"<<(time_second-time_first)<<std::endl;
        std::cout<<time_first<<std::endl<<time_second<<std::endl;

        SE3 current_trans = SE3(rot,trans);


        std::cout<<"trans:\n"<<trans<<std::endl;
        std::cout<<"rot:\n"<<rot<<std::endl;

        vo_->InitPose(current_trans);
        our_scale_ = current_trans.matrix().block<3,1>(0,3).norm();
        return true;
    }
    else{
        ROS_ERROR("Failed to call service get_between_pose");
        return false;
    }
}
예제 #6
0
void VoNode::imgCb(const sensor_msgs::ImageConstPtr& msg)
{
  // Dummy data as the real position for the system.
//  static int old = msg->header.seq;
//  if(msg->header.seq == old)
//      filtered_pose.pose.position.z = 0;
//  else{
//      filtered_pose.pose.position.z += 10.0/70.0 * (msg->header.seq-old);
//  }
//  old = msg->header.seq;

  ROS_INFO("Frame seq: %i", msg->header.seq);

#ifdef USE_ASE_IMU
  tf::StampedTransform transform;
  try{
    listener_.lookupTransform("/camera", "/worldNED",
                           ros::Time(0), transform);
  }

  catch (tf::TransformException ex){
    ROS_ERROR("%s",ex.what());
    return;
  }
#endif

  cv::Mat img;
  try {
    img = cv_bridge::toCvShare(msg, "mono8")->image;
  } catch (cv_bridge::Exception& e) {
    ROS_ERROR("cv_bridge exception: %s", e.what());
  }
  processUserActions();

//  Quaterniond quat(filtered_pose.pose.orientation.w,filtered_pose.pose.orientation.x,filtered_pose.pose.orientation.y, filtered_pose.pose.orientation.z);
//  Matrix3d orient = quat.toRotationMatrix();
//  Vector3d pos(filtered_pose.pose.position.x,filtered_pose.pose.position.y, filtered_pose.pose.position.z);

  Quaterniond quat;
  quat.setIdentity();
  Vector3d pos;
  pos.setZero();
  Matrix3d orient = quat.toRotationMatrix();

#ifdef USE_ASE_IMU
  tf::quaternionTFToEigen(transform.getRotation(), quat);
  orient = quat.toRotationMatrix();
  tf::vectorTFToEigen(transform.getOrigin(), pos);
#endif

  vo_->addImage(img, msg->header.stamp.toSec(), orient, pos);
  visualizer_.publishMinimal(img, vo_->lastFrame(), *vo_, msg->header.stamp.toSec());

  if(publish_markers_ && vo_->stage() != FrameHandlerBase::STAGE_PAUSED)
    visualizer_.visualizeMarkers(vo_->lastFrame(), vo_->coreKeyframes(), vo_->map());

  if(publish_dense_input_)
    visualizer_.exportToDense(vo_->lastFrame());

  if(vo_->stage() == FrameHandlerMono::STAGE_PAUSED)
    usleep(100000);
}
예제 #7
0
파일: vo_node.cpp 프로젝트: ruffsl/rpg_svo
void VoNode::imgCb(const sensor_msgs::ImageConstPtr& msg)
{
  cv::Mat img;
  try {
    img = cv_bridge::toCvShare(msg, "mono8")->image;
  } catch (cv_bridge::Exception& e) {
    ROS_ERROR("cv_bridge exception: %s", e.what());
  }
  processUserActions();

  double diff1 = msg->header.stamp.toSec() - time_start.toSec();
//  std::cout<<"diff1: "<< diff1 <<std::endl;

  if((diff1 > time_window.toSec()) && state == SSTART){
      vo_->reset();
      vo_->start();
      state = SGOT_KEY_1;
  }
  vo_->addImage(img, msg->header.stamp.toSec());
  svo_scale_ = vo_->svo_scale_;

  if((vo_->stage() == FrameHandlerMono::STAGE_SECOND_FRAME) && state == SGOT_KEY_1)
  {    std::cout<<"the first frame at time: "<<msg->header.stamp.toSec()<<std::endl;
       time_first = msg->header.stamp;
       state = SGOT_KEY_2;
  }

  if((vo_->stage() == FrameHandlerMono::STAGE_DEFAULT_FRAME) && state == SGOT_KEY_2)
  {    //std::cout<<"the second frame at time: "<<msg->header.stamp.toSec()<<std::endl;
       time_second = msg->header.stamp;
       state = SGETTING_SCALE;
  }

  double diff2 = msg->header.stamp.toSec() - time_second.toSec();
//  std::cout<<"diff2: "<< diff2 <<std::endl;
//  std::cout<<"state: "<< state <<std::endl;

  if((diff2 > time_window.toSec()) && state == SGETTING_SCALE){
      std::cout<<"time_first: "<< time_first <<std::endl;
      std::cout<<"time_second: "<< time_second <<std::endl;
      if(VoNode::initCb()){
          state = SDEFAULT;
      }
  }

  visualizer_.publishMinimal(img, vo_->lastFrame(), *vo_, msg->header.stamp.toSec());

  if(publish_markers_){
      visualizer_.visualizeMarkers(vo_->lastFrame(), vo_->coreKeyframes(), vo_->map(), state == SDEFAULT, svo_scale_, our_scale_);
  }

  if(publish_dense_input_)
    visualizer_.exportToDense(vo_->lastFrame());

  if(vo_->stage() == FrameHandlerMono::STAGE_PAUSED)
    usleep(100000);
}