Ejemplo n.º 1
0
void Visualizer::visualizeMarkers(
    const FramePtr& frame,
    const set<FramePtr>& core_kfs,
    const Map& map)
{
  if(frame == NULL)
    return;

  vk::output_helper::publishTfTransform(
      frame->T_f_w_*T_world_from_vision_.inverse(),
      ros::Time(frame->timestamp_), "cam_pos", "worldNED", br_);

  if(pub_frames_.getNumSubscribers() > 0 || pub_points_.getNumSubscribers() > 0)
  {
    vk::output_helper::publishHexacopterMarker(
        pub_frames_, "cam_pos", "cams", ros::Time(frame->timestamp_),
        1, 0, 0.3*VIS_SCALE, Vector3d(0.,0.,1.));
    vk::output_helper::publishPointMarker(
        pub_points_, T_world_from_vision_*frame->pos(), "trajectory",
        ros::Time::now(), trace_id_, 0, 0.006*VIS_SCALE, Vector3d(0.,0.,0.5));
    if(frame->isKeyframe() || publish_map_every_frame_)
      publishMapRegion(core_kfs);
    removeDeletedPts(map);
  }
}
Ejemplo n.º 2
0
InitResult KltHomographyInit::addSecondFrame(FramePtr frame_cur)
{
  trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
  SVO_INFO_STREAM("Init: KLT tracked "<< disparities_.size() <<" features");

  if(disparities_.size() < Config::initMinTracked())
    return FAILURE;

  double disparity = vk::getMedian(disparities_);
  SVO_INFO_STREAM("Init: KLT "<<disparity<<"px average disparity.");
  if(disparity < Config::initMinDisparity())
    return NO_KEYFRAME;

  computeHomography(
      f_ref_, f_cur_,
      frame_ref_->cam_->errorMultiplier2(), Config::poseOptimThresh(),
      inliers_, xyz_in_cur_, T_cur_from_ref_);
  SVO_INFO_STREAM("Init: Homography RANSAC "<<inliers_.size()<<" inliers.");

  if(inliers_.size() < Config::initMinInliers())
  {
    SVO_WARN_STREAM("Init WARNING: "<<Config::initMinInliers()<<" inliers minimum required.");
    return FAILURE;
  }

  // Rescale the map such that the mean scene depth is equal to the specified scale
  vector<double> depth_vec;
  for(size_t i=0; i<xyz_in_cur_.size(); ++i)
    depth_vec.push_back((xyz_in_cur_[i]).z());
  double scene_depth_median = vk::getMedian(depth_vec);
  double scale = Config::mapScale()/scene_depth_median;
  frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
  frame_cur->T_f_w_.translation() =
      -frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));

  // For each inlier create 3D point and add feature in both frames
  SE3 T_world_cur = frame_cur->T_f_w_.inverse();
  for(vector<int>::iterator it=inliers_.begin(); it!=inliers_.end(); ++it)
  {
    Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
    Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
    if(frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
    {
      Vector3d pos = T_world_cur * (xyz_in_cur_[*it]*scale);
      Point* new_point = new Point(pos);

      Feature* ftr_cur(new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0));
      frame_cur->addFeature(ftr_cur);
      new_point->addFrameRef(ftr_cur);

      Feature* ftr_ref(new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0));
      frame_ref_->addFeature(ftr_ref);
      new_point->addFrameRef(ftr_ref);
    }
  }
  return SUCCESS;
}
Ejemplo n.º 3
0
void Visualizer::visualizeMarkers(
    const FramePtr& frame,
    const set<FramePtr>& core_kfs,
    const Map& map,
    bool inited,
    double svo_scale,
    double our_scale)
{
  if((frame == NULL) || !inited)
  {
      vk::output_helper::publishTfTransform(
//                  SE3(Matrix3d::Identity(), Vector3d::Zero()),
                  T_world_from_vision_,
                  ros::Time(frame->timestamp_), "odom", "cam_pos", br_);
      return;
  }

  SE3 temp = (frame->T_f_w_*T_world_from_vision_.inverse()).inverse();
  double scale = our_scale / svo_scale;
  temp.translation() = temp.translation()* scale;

  vk::output_helper::publishTfTransform(
      temp,
      ros::Time(frame->timestamp_), "odom", "cam_pos", br_);

  if(pub_frames_.getNumSubscribers() > 0 || pub_points_.getNumSubscribers() > 0)
  {
    vk::output_helper::publishHexacopterMarker(
        pub_frames_, "cam_pos", "cams", ros::Time(frame->timestamp_),
        1, 0, 0.3, Vector3d(0.,0.,1.));
    vk::output_helper::publishPointMarker(
        pub_points_, T_world_from_vision_*frame->pos(), "trajectory",
        ros::Time::now(), trace_id_, 0, 0.006, Vector3d(0.,0.,0.5));
    if(frame->isKeyframe() || publish_map_every_frame_)
      publishMapRegion(core_kfs);
    removeDeletedPts(map);
  }
}
Ejemplo n.º 4
0
InitResult KltHomographyInit::addSecondFrame(FramePtr frame_cur, Matrix3d orient, Vector3d pos)
{
  trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
  SVO_INFO_STREAM("Init: KLT tracked "<< disparities_.size() <<" features");

  if(disparities_.size() < Config::initMinTracked())
    return FAILURE;

  double disparity = vk::getMedian(disparities_);
  SVO_INFO_STREAM("Init: KLT "<<disparity<<"px average disparity.");
  if(disparity < Config::initMinDisparity())
    return NO_KEYFRAME;

  computeHomography(
      f_ref_, f_cur_,
      frame_ref_->cam_->errorMultiplier2(), Config::poseOptimThresh(),
      inliers_, xyz_in_cur_, T_cur_from_ref_);
  SVO_INFO_STREAM("Init: Homography RANSAC "<<inliers_.size()<<" inliers.");

  if(inliers_.size() < Config::initMinInliers())
  {
    SVO_WARN_STREAM("Init WARNING: "<<Config::initMinInliers()<<" inliers minimum required.");
    return FAILURE;
  }

  // Transformation in real world
  T_cur_frame_real_scale = SE3(orient, pos);
  Vector3d trans = T_cur_frame_real_scale.translation() - T_first_frame_real_scale.translation();
  double length_real = sqrt(pow(trans[0],2) + pow(trans[1],2) + pow(trans[2],2));

  SVO_INFO_STREAM("Real world transform x: " << trans[0] << " y:" << trans[1] << " z:" << trans[2] << " length:" << length_real);

  double x = T_cur_from_ref_.translation()[0];
  double y = T_cur_from_ref_.translation()[1];
  double z = T_cur_from_ref_.translation()[2];
  double length_svo = sqrt(pow(x,2) + pow(y,2) + pow(z,2));

  SVO_INFO_STREAM("SVO transform x: " << x << " y:" << y << " z:" << z << " length:" << length_svo);

#ifdef USE_ASE_IMU
  // Rescale the map such that the real length of the movement matches with the svo movement length
  double scale =length_real / length_svo;
#else
  // Rescale the map such that the mean scene depth is equal to the specified scale
  vector<double> depth_vec;
  for(size_t i=0; i<xyz_in_cur_.size(); ++i)
    depth_vec.push_back((xyz_in_cur_[i]).z());
  double scene_depth_median = vk::getMedian(depth_vec);
  double scale = Config::mapScale()/scene_depth_median;
#endif
  frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
  frame_cur->T_f_w_.translation() =
     -frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));

  //frame_cur->T_f_w_ = T_cur_frame_real_scale;
  //frame_ref_->T_f_w_ = T_first_frame_real_scale;

//  // Rescale the map such that the mean scene depth is equal to the specified scale
//  vector<double> depth_vec;
//  for(size_t i=0; i<xyz_in_cur_.size(); ++i)
//    depth_vec.push_back((xyz_in_cur_[i]).z());
//  double scene_depth_median = vk::getMedian(depth_vec);
//  double scale = Config::mapScale()/scene_depth_median;
//  frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;
//  frame_cur->T_f_w_.translation() =
//      -frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));

  // For each inlier create 3D point and add feature in both frames
  SE3 T_world_cur = frame_cur->T_f_w_.inverse();
  for(vector<int>::iterator it=inliers_.begin(); it!=inliers_.end(); ++it)
  {
    Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
    Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
    if(frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
    {
      Vector3d pos = T_world_cur * (xyz_in_cur_[*it]*scale);
      Point* new_point = new Point(pos);

      Feature* ftr_cur(new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0));
      frame_cur->addFeature(ftr_cur);
      new_point->addFrameRef(ftr_cur);

      Feature* ftr_ref(new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0));
      frame_ref_->addFeature(ftr_ref);
      new_point->addFrameRef(ftr_ref);
    }
  }
  return SUCCESS;
}
Ejemplo n.º 5
0
	InitResult Initialization::addSecondFrame(FramePtr frame_cur)
	{
		trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_);
		std::cout << "Init: KLT tracked " << disparities_.size() << " features" << std::endl;

		// 符合光流跟踪的特征数
		if (disparities_.size() < 50)
			return FAILURE;

		// 对两帧光流跟踪之后像素差值的中值
		double disparity = getMedian(disparities_);
		std::cout << "Init: KLT " << disparity << "px average disparity." << std::endl;
		//  如果中值小于给定配置参数,则表明这一帧不是关键帧,也就是刚开始的时候两帧不能太近
		if (disparity < 50.0)
			return NO_KEYFRAME;
		//  计算单应矩阵
		computeHomography(
			f_ref_, f_cur_,
			frame_ref_->cam_->getFocalLength(), 2.0,
			inliers_, xyz_in_cur_, T_cur_from_ref_);
		std::cout << "Init: Homography RANSAC " << inliers_.size() << " inliers." << std::endl;
		// 根据计算单应矩阵之后,内点个数判断是否跟踪
		if (inliers_.size() < 40)
		{
			std::cerr << "Init WARNING: 40 inliers minimum required." << std::endl;
			return FAILURE;
		}

		// 通过单应矩阵,对两帧之间的特征形成的3d点进行计算,计算这些3d的深度中值,转换到指定的scale
		std::vector<double> depth_vec;
		for (size_t i = 0; i < xyz_in_cur_.size(); ++i)
			depth_vec.push_back((xyz_in_cur_[i]).z());
		double scene_depth_median = getMedian(depth_vec);
		double scale = 1.0 / scene_depth_median;
		// 计算相对变换SE3
		frame_cur->T_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_;

		// 对位移变换添加尺度
		frame_cur->T_f_w_.translation() =
			-frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos()));
		
		// 对每个内点创建3D点,设置特征,添加到这两帧中
		SE3 T_world_cur = frame_cur->T_f_w_.inverse();
		for (std::vector<int>::iterator it = inliers_.begin(); it != inliers_.end(); ++it)
		{
			Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y);
			Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y);
			if (frame_ref_->cam_->isInFrame(px_cur.cast<int>(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast<int>(), 10) && xyz_in_cur_[*it].z() > 0)
			{
				Vector3d pos = T_world_cur * (xyz_in_cur_[*it] * scale);// 将相机下的点坐标转世界坐标
				Point3D *new_point = new Point3D(pos);

				Feature* ftr_cur = new Feature(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0);
				frame_cur->addFeature(ftr_cur);
				// 将同一个点对应的特征保存起来,这样点删除了,对应的特征都可以删除
				new_point->addFrameRef(ftr_cur);

				Feature* ftr_ref = new Feature(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0);
				frame_ref_->addFeature(ftr_ref);
				new_point->addFrameRef(ftr_ref);
			}
		}
		return SUCCESS;
	}