Ejemplo n.º 1
0
void Trajectory2d::toGnuplot(const std::string& filename_without_suffix) const
{
  const std::string gnuplot_filename = filename_without_suffix + ".gpl";
  const std::string pose_filename = filename_without_suffix + ".gpldata";
  const std::string curvature_filename = filename_without_suffix + "_curve.gpldata";

  // -- data formatting --

  std::ofstream pose_file(pose_filename.c_str());

  pose_file << "# Trajectory data set containing "<< m_trajectory.size() << " sets of position and orientation information." << std::endl;
  pose_file << "# To draw the poses the orientation is coded as two vectors in gnuplot from-delta style." << std::endl;
  pose_file << "# from: x  y  | delta: x  y" << std::endl;
  pose_file << "# "<< std::endl;
  pose_file << "# "<< std::endl;
  pose_file << "# The trajectory is for "
            << (this->isForwardTrajectory() ? "FORWARD" : "BACKWARD")
            << " motion." << std::endl;


  for (std::size_t i=0; i<m_trajectory.size(); ++i)
  {
    const Position2d from(m_trajectory[i].getPosition());
    const Position2d delta = m_trajectory[i].getOrientation() * Position2d::UnitX() * 0.02;

    pose_file << from.x()  << "  " << from.y() << "  ";
    if (velocityAvailable())
    {
      pose_file << m_trajectory[i].getVelocity() << "  ";
    }
    pose_file << delta.x() << "  " << delta.y() << "  ";
    if (velocityAvailable())
    {
      pose_file << 0.;
    }
    pose_file << std::endl;
  }

  pose_file << std::endl;

  // curvature plots
  if (curvatureAvailable())
  {
    std::ofstream curve_file(curvature_filename.c_str());

    for (std::size_t i=0; i<m_trajectory.size(); ++i)
    {
      // start for curvature lines
      const Position2d from(m_trajectory[i].getPosition());
      // end point for curvature lines
      const double curvature = m_trajectory[i].getCurvature() * 0.5; // apply scale for better visualization
      const double yaw = m_trajectory[i].getYaw();
      Position2d to;
      to.x() = from.x() - std::sin(yaw) * curvature;
      to.y() = from.y() + std::cos(yaw) * curvature;

      curve_file << from.x() << "  " << from.y();
      if (velocityAvailable())
      {
        curve_file << "  0.";
      }
      curve_file << std::endl;
      curve_file << to.x()   << "  " << to.y();
      if (velocityAvailable())
      {
        curve_file << "  0.";
      }
      curve_file << std::endl << std::endl << std::endl; // apply empty line to separate plot
    }
    curve_file.close();
  }

  pose_file.close();

  // -- the plot itself --

  std::ofstream gnuplot_file(gnuplot_filename.c_str(), std::ios::out);

  std::string plot_type = "plot";
  velocityAvailable() ? plot_type = "splot" : plot_type = "plot";

  gnuplot_file << "# Gnuplot file. Draw with gnuplot -p "<< pose_filename << std::endl;
  gnuplot_file << "set mapping cartesian" << std::endl;
  gnuplot_file << "set mouse" << std::endl;
  gnuplot_file << "set size ratio -1" << std::endl;
  gnuplot_file << plot_type + " '"<< pose_filename << "' using 1:2:3:4";
  if (velocityAvailable())
  {
    gnuplot_file << ":5:6";
  }
  gnuplot_file << " with vector";
  if (curvatureAvailable())
  {
    gnuplot_file << ", " << "'" << curvature_filename << "' using 1:2";
    if (velocityAvailable())
    {
      gnuplot_file << ":3";
    }
    gnuplot_file << " w l";
  }
  gnuplot_file << std::endl;
  gnuplot_file.close();
  std::cout << "GnuPlot file written to " << gnuplot_filename << std::endl;
}
Ejemplo n.º 2
0
void pcl::face_detection::FaceDetectorDataProvider<FeatureType, DataSet, LabelType, ExampleIndex, NodeType>::initialize(std::string & data_dir)
{
  std::string start;
  std::string ext = std::string ("pcd");
  bf::path dir = data_dir;

  std::vector < std::string > files;
  getFilesInDirectory (dir, start, files, ext);

  //apart from loading the file names, we will do some bining regarding pitch and yaw
  std::vector < std::vector<int> > yaw_pitch_bins;
  std::vector < std::vector<std::vector<std::string> > > image_files_per_bin;

  float res_yaw = 15.f;
  float res_pitch = res_yaw;
  int min_yaw = -75;
  int min_pitch = -60;

  int num_yaw = static_cast<int>((std::abs (min_yaw) * 2) / static_cast<int>(res_yaw + 1.f));
  int num_pitch = static_cast<int>((std::abs (min_pitch) * 2) / static_cast<int>(res_pitch + 1.f));

  yaw_pitch_bins.resize (num_yaw);
  image_files_per_bin.resize (num_yaw);
  for (int i = 0; i < num_yaw; i++)
  {
    yaw_pitch_bins[i].resize (num_pitch);
    image_files_per_bin[i].resize (num_pitch);
    for (int j = 0; j < num_pitch; j++)
    {
      yaw_pitch_bins[i][j] = 0;
    }
  }

  for (size_t i = 0; i < files.size (); i++)
  {
    std::stringstream filestream;
    filestream << data_dir << "/" << files[i];
    std::string file = filestream.str ();

    std::string pose_file (files[i]);
    boost::replace_all (pose_file, ".pcd", "_pose.txt");

    Eigen::Matrix4f pose_mat;
    pose_mat.setIdentity (4, 4);

    std::stringstream filestream_pose;
    filestream_pose << data_dir << "/" << pose_file;
    pose_file = filestream_pose.str ();

    bool result = readMatrixFromFile (pose_file, pose_mat);
    if (result)
    {
      Eigen::Vector3f ea = pose_mat.block<3, 3> (0, 0).eulerAngles (0, 1, 2);
      ea *= 57.2957795f; //transform it to degrees to do the binning
      int y = static_cast<int>(pcl_round ((ea[0] + static_cast<float>(std::abs (min_yaw))) / res_yaw));
      int p = static_cast<int>(pcl_round ((ea[1] + static_cast<float>(std::abs (min_pitch))) / res_pitch));

      if (y < 0)
        y = 0;
      if (p < 0)
        p = 0;
      if (p >= num_pitch)
        p = num_pitch - 1;
      if (y >= num_yaw)
        y = num_yaw - 1;

      assert (y >= 0 && y < num_yaw);
      assert (p >= 0 && p < num_pitch);

      yaw_pitch_bins[y][p]++;

      image_files_per_bin[y][p].push_back (file);
    }
  }

  pcl::face_detection::showBining (num_pitch, res_pitch, min_pitch, num_yaw, res_yaw, min_yaw, yaw_pitch_bins);

  int max_elems = 0;
  int total_elems = 0;

  for (int i = 0; i < num_yaw; i++)
  {
    for (int j = 0; j < num_pitch; j++)
    {
      total_elems += yaw_pitch_bins[i][j];
      if (yaw_pitch_bins[i][j] > max_elems)
        max_elems = yaw_pitch_bins[i][j];
    }
  }

  float average = static_cast<float> (total_elems) / (static_cast<float> (num_pitch + num_yaw));
  std::cout << "The average number of image per bin is:" << average << std::endl;

  std::cout << "Total number of images in the dataset:" << total_elems << std::endl;
  //reduce unbalance from dataset by capping the number of images per bin, keeping at least a certain min
  if (min_images_per_bin_ != -1)
  {
    std::cout << "Reducing unbalance of the dataset." << std::endl;
    for (int i = 0; i < num_yaw; i++)
    {
      for (int j = 0; j < num_pitch; j++)
      {
        if (yaw_pitch_bins[i][j] >= min_images_per_bin_)
        {
          std::random_shuffle (image_files_per_bin[i][j].begin (), image_files_per_bin[i][j].end ());
          image_files_per_bin[i][j].resize (min_images_per_bin_);
          yaw_pitch_bins[i][j] = min_images_per_bin_;
        }

        for (size_t ii = 0; ii < image_files_per_bin[i][j].size (); ii++)
        {
          image_files_.push_back (image_files_per_bin[i][j][ii]);
        }
      }
    }
  }

  pcl::face_detection::showBining (num_pitch, res_pitch, min_pitch, num_yaw, res_yaw, min_yaw, yaw_pitch_bins);
  std::cout << "Total number of images in the dataset:" << image_files_.size () << std::endl;
}
void InitGui() {
  InitTrackerGui(gui_vars, window_width, window_height, image_width,
                 image_height, rig.cameras_.size());

  //  std::cerr << "Viewport: " << gui_vars.camera_view->v.l << " " <<
  //               gui_vars.camera_view->v.r() << " " <<
  // gui_vars.camera_view->v.b << " " <<
  //               gui_vars.camera_view->v.t() << std::endl;

  pangolin::RegisterKeyPressCallback(
      pangolin::PANGO_SPECIAL + pangolin::PANGO_KEY_RIGHT,
      [&]() { is_stepping = true; });

  pangolin::RegisterKeyPressCallback(pangolin::PANGO_CTRL + 'r', [&]() {
    camera_img.reset();
    is_keyframe = true;
    is_prev_keyframe = true;
    is_running = false;
    InitTracker();
    poses.clear();
    gui_vars.scene_graph.Clear();
    gui_vars.scene_graph.AddChild(&gui_vars.grid);
    axes.clear();
    LoadCameras();
    prev_delta_t_ba = Sophus::SE3d();
    prev_t_ba = Sophus::SE3d();
    last_t_ba = Sophus::SE3d();
  });

  pangolin::RegisterKeyPressCallback(pangolin::PANGO_CTRL + 's', [&]() {
    // write all the poses to a file.
    std::ofstream pose_file("poses.txt", std::ios_base::trunc);
    for (auto pose : poses) {
      pose_file << pose->t_wp.translation().transpose().format(
                       sdtrack::kLongCsvFmt) << std::endl;
    }
  });

  pangolin::RegisterKeyPressCallback(' ', [&]() { is_running = !is_running; });

  pangolin::RegisterKeyPressCallback('b', [&]() {
    // last_optimization_level = 0;
    tracker.OptimizeTracks(last_optimization_level, optimize_landmarks,
                           optimize_pose);
    UpdateCurrentPose();
  });

  pangolin::RegisterKeyPressCallback('B', [&]() {
    do_bundle_adjustment = !do_bundle_adjustment;
    std::cerr << "Do BA:" << do_bundle_adjustment << std::endl;
  });

  pangolin::RegisterKeyPressCallback('S', [&]() {
    do_start_new_landmarks = !do_start_new_landmarks;
    std::cerr << "Do SNL:" << do_start_new_landmarks << std::endl;
  });

  pangolin::RegisterKeyPressCallback('2',
                                     [&]() { last_optimization_level = 2; });

  pangolin::RegisterKeyPressCallback('3',
                                     [&]() { last_optimization_level = 3; });

  pangolin::RegisterKeyPressCallback('1',
                                     [&]() { last_optimization_level = 1; });

  pangolin::RegisterKeyPressCallback('0',
                                     [&]() { last_optimization_level = 0; });

  pangolin::RegisterKeyPressCallback('9', [&]() {
    last_optimization_level = 0;
    tracker.OptimizeTracks(-1, optimize_landmarks, optimize_pose);
    UpdateCurrentPose();
  });

  pangolin::RegisterKeyPressCallback('p', [&]() {
    tracker.PruneTracks();
    // Update the pose t_ab based on the result from the tracker.
    UpdateCurrentPose();
    BaAndStartNewLandmarks();
  });

  pangolin::RegisterKeyPressCallback('l', [&]() {
    optimize_landmarks = !optimize_landmarks;
    std::cerr << "optimize landmarks: " << optimize_landmarks << std::endl;
  });

  pangolin::RegisterKeyPressCallback('c', [&]() {
    optimize_pose = !optimize_pose;
    std::cerr << "optimize pose: " << optimize_pose << std::endl;
  });

  pangolin::RegisterKeyPressCallback('m', [&]() {
    is_manual_mode = !is_manual_mode;
    std::cerr << "Manual mode:" << is_manual_mode << std::endl;
  });

  pangolin::RegisterKeyPressCallback('k', [&]() {
    sdtrack::AlignmentOptions options;
    options.apply_to_kp = true;
    tracker.Do2dAlignment(options, tracker.GetImagePyramid(),
                          tracker.GetCurrentTracks(), last_optimization_level);
  });
}
Ejemplo n.º 4
0
void pcl::face_detection::FaceDetectorDataProvider<FeatureType, DataSet, LabelType, ExampleIndex, NodeType>::getDatasetAndLabels(DataSet & data_set,
    std::vector<LabelType> & label_data, std::vector<ExampleIndex> & examples)
{
  srand (static_cast<unsigned int>(time (NULL)));
  std::random_shuffle (image_files_.begin (), image_files_.end ());
  std::vector < std::string > files;
  files = image_files_;
  files.resize (std::min (num_images_, static_cast<int> (files.size ())));

  std::vector < TrainingExample > training_examples;
  std::vector<float> labels;

  int total_neg, total_pos;
  total_neg = total_pos = 0;

#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
  pcl::visualization::PCLVisualizer vis("training");
#endif

  for (size_t j = 0; j < files.size (); j++)
  {

#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
    vis.removeAllPointClouds();
    vis.removeAllShapes();
#endif

    if ((j % 50) == 0)
    {
      std::cout << "Loading image..." << j << std::endl;
    }
    //1. Load clouds with and without labels
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
    pcl::PointCloud<pcl::PointXYZ>::Ptr loaded_cloud (new pcl::PointCloud<pcl::PointXYZ>);

    pcl::io::loadPCDFile (files[j], *loaded_cloud);

    pcl::PointCloud<pcl::PointXYZL>::Ptr cloud_labels (new pcl::PointCloud<pcl::PointXYZL>);
    pcl::PointCloud<pcl::PointXYZL>::Ptr loaded_cloud_labels (new pcl::PointCloud<pcl::PointXYZL>);
    pcl::io::loadPCDFile (files[j], *loaded_cloud_labels);

    //crop images to remove as many NaNs as possible and reduce the memory footprint
    {
      size_t min_col, min_row;
      size_t max_col, max_row;
      min_col = min_row = std::numeric_limits<size_t>::max ();
      max_col = max_row = 0;

      for (size_t col = 0; col < loaded_cloud->width; col++)
      {
        for (size_t row = 0; row < loaded_cloud->height; row++)
        {
          if (pcl::isFinite (loaded_cloud->at (col, row)))
          {
            if (row < min_row)
              min_row = row;

            if (row > max_row)
              max_row = row;

            if (col < min_col)
              min_col = col;

            if (col > max_col)
              max_col = col;
          }
        }
      }

      //std::cout << min_col << " - " << max_col << std::endl;
      //std::cout << min_row << " - " << max_row << std::endl;

      cropCloud<pcl::PointXYZ> (min_col, max_col, min_row, max_row, *loaded_cloud, *cloud);
      cropCloud<pcl::PointXYZL> (min_col, max_col, min_row, max_row, *loaded_cloud_labels, *cloud_labels);

      /*pcl::visualization::PCLVisualizer vis ("training");
       vis.addPointCloud(loaded_cloud);
       vis.spin();*/
    }

    //Compute integral image over depth
    boost::shared_ptr < pcl::IntegralImage2D<float, 1> > integral_image_depth;
    integral_image_depth.reset (new pcl::IntegralImage2D<float, 1> (false));

    int element_stride = sizeof(pcl::PointXYZ) / sizeof(float);
    int row_stride = element_stride * cloud->width;
    const float *data = reinterpret_cast<const float*> (&cloud->points[0]);
    integral_image_depth->setInput (data + 2, cloud->width, cloud->height, element_stride, row_stride);

    //Compute normals and normal integral images
    pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);

    if (USE_NORMALS_)
    {
      typedef typename pcl::IntegralImageNormalEstimation<pcl::PointXYZ, pcl::Normal> NormalEstimator_;
      NormalEstimator_ n3d;
      n3d.setNormalEstimationMethod (n3d.COVARIANCE_MATRIX);
      n3d.setInputCloud (cloud);
      n3d.setRadiusSearch (0.02);
      n3d.setKSearch (0);
      {
        pcl::ScopeTime t ("compute normals...");
        n3d.compute (*normals);
      }
    }

    int element_stride_normal = sizeof(pcl::Normal) / sizeof(float);
    int row_stride_normal = element_stride_normal * normals->width;
    boost::shared_ptr < pcl::IntegralImage2D<float, 1> > integral_image_normal_x;
    boost::shared_ptr < pcl::IntegralImage2D<float, 1> > integral_image_normal_y;
    boost::shared_ptr < pcl::IntegralImage2D<float, 1> > integral_image_normal_z;

    if (USE_NORMALS_)
    {
      integral_image_normal_x.reset (new pcl::IntegralImage2D<float, 1> (false));
      const float *data_nx = reinterpret_cast<const float*> (&normals->points[0]);
      integral_image_normal_x->setInput (data_nx, normals->width, normals->height, element_stride_normal, row_stride_normal);

      integral_image_normal_y.reset (new pcl::IntegralImage2D<float, 1> (false));
      const float *data_ny = reinterpret_cast<const float*> (&normals->points[0]);
      integral_image_normal_y->setInput (data_ny + 1, normals->width, normals->height, element_stride_normal, row_stride_normal);

      integral_image_normal_z.reset (new pcl::IntegralImage2D<float, 1> (false));
      const float *data_nz = reinterpret_cast<const float*> (&normals->points[0]);
      integral_image_normal_z->setInput (data_nz + 2, normals->width, normals->height, element_stride_normal, row_stride_normal);
    }

    //Using cloud labels estimate a 2D window from where to extract positive samples
    //Rest can be used to extract negative samples
    size_t min_col, min_row;
    size_t max_col, max_row;
    min_col = min_row = std::numeric_limits<size_t>::max ();
    max_col = max_row = 0;

    //std::cout << cloud_labels->width << " " << cloud_labels->height << std::endl;

    for (size_t col = 0; col < cloud_labels->width; col++)
    {
      for (size_t row = 0; row < cloud_labels->height; row++)
      {
        if (cloud_labels->at (col, row).label == 1)
        {
          if (row < min_row)
            min_row = row;

          if (row > max_row)
            max_row = row;

          if (col < min_col)
            min_col = col;

          if (col > max_col)
            max_col = col;
        }
      }
    }

#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
    pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_intensity(new pcl::PointCloud<pcl::PointXYZI>);
    cloud_intensity->width = cloud->width;
    cloud_intensity->height = cloud->height;
    cloud_intensity->points.resize(cloud->points.size());
    cloud_intensity->is_dense = cloud->is_dense;

    for (int jjj = 0; jjj < static_cast<int>(cloud->points.size()); jjj++)
    {
      cloud_intensity->points[jjj].getVector4fMap() = cloud->points[jjj].getVector4fMap();
      cloud_intensity->points[jjj].intensity = 0.f;
      int row, col;
      col = jjj % cloud->width;
      row = jjj / cloud->width;
      //std::cout << row << " " << col << std::endl;
      if (check_inside(col, row, min_col, max_col, min_row, max_row))
      {
        cloud_intensity->points[jjj].intensity = 1.f;
      }
    }

    pcl::visualization::PointCloudColorHandlerGenericField < pcl::PointXYZI > handler(cloud_intensity, "intensity");
    vis.addPointCloud(cloud_intensity, handler, "intensity_cloud");
#endif

    std::string pose_file (files[j]);
    boost::replace_all (pose_file, ".pcd", "_pose.txt");

    Eigen::Matrix4f pose_mat;
    pose_mat.setIdentity (4, 4);
    readMatrixFromFile (pose_file, pose_mat);

    Eigen::Vector3f ea = pose_mat.block<3, 3> (0, 0).eulerAngles (0, 1, 2);
    Eigen::Vector3f trans_vector = Eigen::Vector3f (pose_mat (0, 3), pose_mat (1, 3), pose_mat (2, 3));

    pcl::PointXYZ center_point;
    center_point.x = trans_vector[0];
    center_point.y = trans_vector[1];
    center_point.z = trans_vector[2];

    int N_patches = patches_per_image_;
    int pos_extracted = 0;
    int neg_extracted = 0;
    int w_size_2 = static_cast<int> (w_size_ / 2);

    //************************************************
    //2nd training style, fanelli's journal description
    //************************************************
    {

      typedef std::pair<int, int> pixelpair;
      std::vector < pixelpair > negative_p, positive_p;
      //get negative and positive indices to sample from
      for (int col = 0; col < (static_cast<int> (cloud_labels->width) - w_size_); col++)
      {
        for (int row = 0; row < (static_cast<int> (cloud_labels->height) - w_size_); row++)
        {
          if (!pcl::isFinite (cloud->at (col + w_size_2, row + w_size_2)))
            continue;

          //reject patches with more than percent invalid values
          float percent = 0.5f;
          if (static_cast<float>(integral_image_depth->getFiniteElementsCount (col, row, w_size_, w_size_)) < (percent * static_cast<float>(w_size_ * w_size_)))
            continue;

          pixelpair pp = std::make_pair (col, row);
          if (cloud_labels->at (col + w_size_2, row + w_size_2).label == 1)
            positive_p.push_back (pp);
          else
            negative_p.push_back (pp);
        }
      }

      //shuffle and resize
      std::random_shuffle (positive_p.begin (), positive_p.end ());
      std::random_shuffle (negative_p.begin (), negative_p.end ());
      positive_p.resize (N_patches);
      negative_p.resize (N_patches);

      //extract positive patch
      for (size_t p = 0; p < positive_p.size (); p++)
      {
        int col, row;
        col = positive_p[p].first;
        row = positive_p[p].second;

        pcl::PointXYZ patch_center_point;
        patch_center_point.x = cloud->at (col + w_size_2, row + w_size_2).x;
        patch_center_point.y = cloud->at (col + w_size_2, row + w_size_2).y;
        patch_center_point.z = cloud->at (col + w_size_2, row + w_size_2).z;

        TrainingExample te;
        te.iimages_.push_back (integral_image_depth);
        if (USE_NORMALS_)
        {
          te.iimages_.push_back (integral_image_normal_x);
          te.iimages_.push_back (integral_image_normal_y);
          te.iimages_.push_back (integral_image_normal_z);
        }

        te.row_ = row;
        te.col_ = col;
        te.wsize_ = w_size_;

        te.trans_ = center_point.getVector3fMap () - patch_center_point.getVector3fMap ();
        te.trans_ *= 1000.f; //transform it to millimeters
        te.rot_ = ea;
        te.rot_ *= 57.2957795f; //transform it to degrees

        labels.push_back (1);
        pos_extracted++;
        total_pos++;

        training_examples.push_back (te);
#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
        pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_intensity2(new pcl::PointCloud<pcl::PointXYZI>(*cloud_intensity));
        for (int jjj = col; jjj < (col + w_size_); jjj++)
        {
          for (int iii = row; iii < (row + w_size_); iii++)
          {
            cloud_intensity2->at(jjj, iii).intensity = 2.f;
          }
        }

        vis.removeAllPointClouds();

        pcl::visualization::PointCloudColorHandlerGenericField < pcl::PointXYZI > handler(cloud_intensity2, "intensity");
        vis.addPointCloud(cloud_intensity2, handler, "cloud");
        vis.spinOnce();
        vis.spin();
        sleep(1);
#endif

      }

#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
      std::cout << "Going to extract negative patches..." << std::endl;
      sleep(2);
#endif

      for (size_t p = 0; p < negative_p.size (); p++)
      {
        int col, row;
        col = negative_p[p].first;
        row = negative_p[p].second;

        TrainingExample te;
        te.iimages_.push_back (integral_image_depth);
        if (USE_NORMALS_)
        {
          te.iimages_.push_back (integral_image_normal_x);
          te.iimages_.push_back (integral_image_normal_y);
          te.iimages_.push_back (integral_image_normal_z);
        }

        te.row_ = row;
        te.col_ = col;
        te.wsize_ = w_size_;
        labels.push_back (0);
        neg_extracted++;
        total_neg++;

        training_examples.push_back (te);
#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
        pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_intensity2(new pcl::PointCloud<pcl::PointXYZI>(*cloud_intensity));
        for (int jjj = col; jjj < (col + w_size_); jjj++)
        {
          for (int iii = row; iii < (row + w_size_); iii++)
          {
            cloud_intensity2->at(jjj, iii).intensity = 2.f;
          }
        }

        vis.removeAllPointClouds();

        pcl::visualization::PointCloudColorHandlerGenericField < pcl::PointXYZI > handler(cloud_intensity2, "intensity");
        vis.addPointCloud(cloud_intensity2, handler, "cloud");
        vis.spinOnce();
        vis.spin();
        sleep(1);
#endif
      }

      if (neg_extracted != N_patches)
      {
        std::cout << "Extracted " << neg_extracted << " negative patches" << std::endl;
        std::cout << files[j] << std::endl;
      }

      if (pos_extracted != N_patches)
      {
        std::cout << "Extracted " << pos_extracted << " positive patches" << std::endl;
        std::cout << files[j] << std::endl;
      }
    }
  }

  std::cout << training_examples.size () << " " << labels.size () << " " << total_neg << " " << total_pos << std::endl;

  //train random forest and make persistent
  std::vector<int> example_indices;
  for (size_t i = 0; i < labels.size (); i++)
    example_indices.push_back (static_cast<int> (i));

  label_data = labels;
  data_set = training_examples;
  examples = example_indices;
}
Ejemplo n.º 5
0
void run(pcl::RFFaceDetectorTrainer & fdrf, typename pcl::PointCloud<PointInT>::Ptr & scene_vis, pcl::visualization::PCLVisualizer & vis, bool heat_map,
    bool show_votes, const std::string & filename)
{
  pcl::PointCloud<pcl::PointXYZ>::Ptr scene (new pcl::PointCloud<pcl::PointXYZ>);
  pcl::copyPointCloud (*scene_vis, *scene);

  fdrf.setInputCloud (scene);

  if (heat_map)
  {
    pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_cloud (new pcl::PointCloud<pcl::PointXYZI>);
    fdrf.setFaceHeatMapCloud (intensity_cloud);
  }

  fdrf.detectFaces ();

  typedef typename pcl::traits::fieldList<PointInT>::type FieldListM;

  double rgb_m;
  bool exists_m;
  pcl::for_each_type < FieldListM > (pcl::CopyIfFieldExists<PointInT, double> (scene_vis->points[0], "rgb", exists_m, rgb_m));

  std::cout << "Color exists:" << static_cast<int> (exists_m) << std::endl;
  if (exists_m)
  {
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr to_visualize (new pcl::PointCloud<pcl::PointXYZRGB>);
    pcl::copyPointCloud (*scene_vis, *to_visualize);

    pcl::visualization::PointCloudColorHandlerRGBField < pcl::PointXYZRGB > handler_keypoints (to_visualize);
    vis.addPointCloud < pcl::PointXYZRGB > (to_visualize, handler_keypoints, "scene_cloud");
  } else
  {
    vis.addPointCloud (scene_vis, "scene_cloud");
  }

  if (heat_map)
  {
    pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_cloud (new pcl::PointCloud<pcl::PointXYZI>);
    fdrf.getFaceHeatMap (intensity_cloud);

    pcl::visualization::PointCloudColorHandlerGenericField < pcl::PointXYZI > handler_keypoints (intensity_cloud, "intensity");
    vis.addPointCloud < pcl::PointXYZI > (intensity_cloud, handler_keypoints, "heat_map");
  }

  if (show_votes)
  {
    //display votes_
    /*pcl::PointCloud<pcl::PointXYZ>::Ptr votes_cloud(new pcl::PointCloud<pcl::PointXYZ>());
     fdrf.getVotes(votes_cloud);
     pcl::visualization::PointCloudColorHandlerCustom < pcl::PointXYZ > handler_votes(votes_cloud, 255, 0, 0);
     vis.addPointCloud < pcl::PointXYZ > (votes_cloud, handler_votes, "votes_cloud");
     vis.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 14, "votes_cloud");
     vis.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY, 0.5, "votes_cloud");
     vis.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY, 0.75, "votes_cloud");*/

    pcl::PointCloud<pcl::PointXYZI>::Ptr votes_cloud (new pcl::PointCloud<pcl::PointXYZI> ());
    fdrf.getVotes2 (votes_cloud);
    pcl::visualization::PointCloudColorHandlerGenericField < pcl::PointXYZI > handler_votes (votes_cloud, "intensity");
    vis.addPointCloud < pcl::PointXYZI > (votes_cloud, handler_votes, "votes_cloud");
    vis.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 14, "votes_cloud");
  }

  vis.addCoordinateSystem (0.1, "global");

  std::vector<Eigen::VectorXd> heads;
  fdrf.getDetectedFaces (heads);
  face_detection_apps_utils::displayHeads (heads, vis);

  if (SHOW_GT)
  {
    //check if there is ground truth data
    std::string pose_file (filename);
    boost::replace_all (pose_file, ".pcd", "_pose.txt");

    Eigen::Matrix4d pose_mat;
    pose_mat.setIdentity (4, 4);
    bool result = face_detection_apps_utils::readMatrixFromFile (pose_file, pose_mat);

    if (result)
    {
      Eigen::Vector3d ea = pose_mat.block<3, 3> (0, 0).eulerAngles (0, 1, 2);
      Eigen::Vector3d trans_vector = Eigen::Vector3d (pose_mat (0, 3), pose_mat (1, 3), pose_mat (2, 3));
      std::cout << ea << std::endl;
      std::cout << trans_vector << std::endl;

      pcl::PointXYZ center_point;
      center_point.x = trans_vector[0];
      center_point.y = trans_vector[1];
      center_point.z = trans_vector[2];
      vis.addSphere (center_point, 0.05, 255, 0, 0, "sphere");

      pcl::ModelCoefficients cylinder_coeff;
      cylinder_coeff.values.resize (7); // We need 7 values
      cylinder_coeff.values[0] = center_point.x;
      cylinder_coeff.values[1] = center_point.y;
      cylinder_coeff.values[2] = center_point.z;

      cylinder_coeff.values[3] = ea[0];
      cylinder_coeff.values[4] = ea[1];
      cylinder_coeff.values[5] = ea[2];

      Eigen::Vector3d vec = Eigen::Vector3d::UnitZ () * -1.;
      Eigen::Matrix3d matrixxx;

      matrixxx = Eigen::AngleAxisd (ea[0], Eigen::Vector3d::UnitX ()) * Eigen::AngleAxisd (ea[1], Eigen::Vector3d::UnitY ())
          * Eigen::AngleAxisd (ea[2], Eigen::Vector3d::UnitZ ());

      //matrixxx = pose_mat.block<3,3>(0,0);
      vec = matrixxx * vec;

      cylinder_coeff.values[3] = vec[0];
      cylinder_coeff.values[4] = vec[1];
      cylinder_coeff.values[5] = vec[2];

      cylinder_coeff.values[6] = 0.01;
      vis.addCylinder (cylinder_coeff, "cylinder");
    }
  }

  vis.setRepresentationToSurfaceForAllActors ();

  if (VIDEO)
  {
    vis.spinOnce (50, true);
  } else
  {
    vis.spin ();
  }

  vis.removeAllPointClouds ();
  vis.removeAllShapes ();
}