예제 #1
0
    void
    estimateNormals (const typename PointCloud<PointT>::ConstPtr &input, PointCloud<Normal> &normals)
    {
      if (input->isOrganized ())
      {
        IntegralImageNormalEstimation<PointT, Normal> ne;
        // Set the parameters for normal estimation
        ne.setNormalEstimationMethod (ne.COVARIANCE_MATRIX);
        ne.setMaxDepthChangeFactor (0.02f);
        ne.setNormalSmoothingSize (20.0f);
        // Estimate normals in the cloud
        ne.setInputCloud (input);
        ne.compute (normals);

        // Save the distance map for the plane comparator
        float *map=ne.getDistanceMap ();// This will be deallocated with the IntegralImageNormalEstimation object...
        distance_map_.assign(map, map+input->size() ); //...so we must copy the data out
        plane_comparator_->setDistanceMap(distance_map_.data());
      }
      else
      {
        NormalEstimation<PointT, Normal> ne;
        ne.setInputCloud (input);
        ne.setRadiusSearch (0.02f);
        ne.setSearchMethod (search_);
        ne.compute (normals);
      }
    }
예제 #2
0
 template <typename PointT> QList <pcl::cloud_composer::CloudComposerItem*>
 pcl::cloud_composer::OrganizedSegmentationTool::performTemplatedAction (QList <const CloudComposerItem*> input_data)
 {
   QList <CloudComposerItem*> output;  
   
   foreach (const CloudComposerItem* input_item, input_data)
   {
     QVariant variant = input_item->data (ItemDataRole::CLOUD_TEMPLATED);
     if ( ! variant.canConvert <typename PointCloud<PointT>::Ptr> () )
     {  
       qWarning () << "Attempted to cast to template type which does not exist in this item! (input list)";
       return output;
     }
     typename PointCloud <PointT>::Ptr input_cloud = variant.value <typename PointCloud<PointT>::Ptr> ();
     if ( ! input_cloud->isOrganized ())
     {
       qCritical () << "Organized Segmentation requires an organized cloud!";
       return output;
     }
   }
예제 #3
0
 float getMeanDepth(const typename PointCloud<PointT>::ConstPtr cloud, int row_up, int row_down, int col_left, int col_right) {
   float sum = 0;
   int count = 0;
   for (int row = row_up; row < row_down; row++) {
     for (int col = col_left; col < col_right; col++) {
       const PointT &p = cloud->at(col, row);
       if (!isFinite(p)) {
         continue;
       }
       sum += p.z;
       count++;
     }
   }
   return sum / count;
 }
예제 #4
0
    void
    segment (const PointT &picked_point, 
             int picked_idx,
             PlanarRegion<PointT> &region,
             typename PointCloud<PointT>::Ptr &object)
    {
      object.reset ();

      // Segment out all planes
      vector<ModelCoefficients> model_coefficients;
      vector<PointIndices> inlier_indices, boundary_indices;
      vector<PlanarRegion<PointT>, Eigen::aligned_allocator<PlanarRegion<PointT> > > regions;

      // Prefer a faster method if the cloud is organized, over RANSAC
      if (cloud_->isOrganized ())
      {
        print_highlight (stderr, "Estimating normals ");
        TicToc tt; tt.tic ();
        // Estimate normals
        PointCloud<Normal>::Ptr normal_cloud (new PointCloud<Normal>);
        estimateNormals (cloud_, *normal_cloud);
        print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%lu", normal_cloud->size ()); print_info (" points]\n");

        OrganizedMultiPlaneSegmentation<PointT, Normal, Label> mps;
        mps.setMinInliers (1000);
        mps.setAngularThreshold (deg2rad (3.0)); // 3 degrees
        mps.setDistanceThreshold (0.03); // 2 cm
        mps.setMaximumCurvature (0.001); // a small curvature
        mps.setProjectPoints (true);
        mps.setComparator (plane_comparator_);
        mps.setInputNormals (normal_cloud);
        mps.setInputCloud (cloud_);

        // Use one of the overloaded segmentAndRefine calls to get all the information that we want out
        PointCloud<Label>::Ptr labels (new PointCloud<Label>);
        vector<PointIndices> label_indices;
        mps.segmentAndRefine (regions, model_coefficients, inlier_indices, labels, label_indices, boundary_indices);
      }
      else
      {
        SACSegmentation<PointT> seg;
        seg.setOptimizeCoefficients (true);
        seg.setModelType (SACMODEL_PLANE);
        seg.setMethodType (SAC_RANSAC);
        seg.setMaxIterations (10000);
        seg.setDistanceThreshold (0.005);

        // Copy XYZ and Normals to a new cloud
        typename PointCloud<PointT>::Ptr cloud_segmented (new PointCloud<PointT> (*cloud_));
        typename PointCloud<PointT>::Ptr cloud_remaining (new PointCloud<PointT>);

        ModelCoefficients coefficients;
        ExtractIndices<PointT> extract;
        PointIndices::Ptr inliers (new PointIndices ());

        // Up until 30% of the original cloud is left
        int i = 1;
        while (double (cloud_segmented->size ()) > 0.3 * double (cloud_->size ()))
        {
          seg.setInputCloud (cloud_segmented);

          print_highlight (stderr, "Searching for the largest plane (%2.0d) ", i++);
          TicToc tt; tt.tic ();
          seg.segment (*inliers, coefficients);
          print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%lu", inliers->indices.size ()); print_info (" points]\n");
 
          // No datasets could be found anymore
          if (inliers->indices.empty ())
            break;

          // Save this plane
          PlanarRegion<PointT> region;
          region.setCoefficients (coefficients);
          regions.push_back (region);

          inlier_indices.push_back (*inliers);
          model_coefficients.push_back (coefficients);

          // Extract the outliers
          extract.setInputCloud (cloud_segmented);
          extract.setIndices (inliers);
          extract.setNegative (true);
          extract.filter (*cloud_remaining);
          cloud_segmented.swap (cloud_remaining);
        }
      }
      print_highlight ("Number of planar regions detected: %lu for a cloud of %lu points\n", regions.size (), cloud_->size ());

      double max_dist = numeric_limits<double>::max ();
      // Compute the distances from all the planar regions to the picked point, and select the closest region
      int idx = -1;
      for (size_t i = 0; i < regions.size (); ++i)
      {
        double dist = pointToPlaneDistance (picked_point, regions[i].getCoefficients ()); 
        if (dist < max_dist)
        {
          max_dist = dist;
          idx = static_cast<int> (i);
        }
      }

      // Get the plane that holds the object of interest
      if (idx != -1)
      {
        plane_indices_.reset (new PointIndices (inlier_indices[idx]));

        if (cloud_->isOrganized ())
        {
          approximatePolygon (regions[idx], region, 0.01f, false, true);
          print_highlight ("Planar region: %lu points initial, %lu points after refinement.\n", regions[idx].getContour ().size (), region.getContour ().size ());
        }
        else
        {
          // Save the current region
          region = regions[idx]; 

          print_highlight (stderr, "Obtaining the boundary points for the region ");
          TicToc tt; tt.tic ();
          // Project the inliers to obtain a better hull
          typename PointCloud<PointT>::Ptr cloud_projected (new PointCloud<PointT>);
          ModelCoefficients::Ptr coefficients (new ModelCoefficients (model_coefficients[idx]));
          ProjectInliers<PointT> proj;
          proj.setModelType (SACMODEL_PLANE);
          proj.setInputCloud (cloud_);
          proj.setIndices (plane_indices_);
          proj.setModelCoefficients (coefficients);
          proj.filter (*cloud_projected);

          // Compute the boundary points as a ConvexHull
          ConvexHull<PointT> chull;
          chull.setDimension (2);
          chull.setInputCloud (cloud_projected);
          PointCloud<PointT> plane_hull;
          chull.reconstruct (plane_hull);
          region.setContour (plane_hull);
          print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%lu", plane_hull.size ()); print_info (" points]\n");
        }

      }

      // Segment the object of interest
      if (plane_indices_ && !plane_indices_->indices.empty ())
      {
        plane_.reset (new PointCloud<PointT>);
        copyPointCloud (*cloud_, plane_indices_->indices, *plane_);
        object.reset (new PointCloud<PointT>);
        segmentObject (picked_idx, cloud_, plane_indices_, *object);
      }
    }
예제 #5
0
/** \brief does radius search and tests the results to be unique, valid and ordered. Additionally it test whether all test methods are returning the same results
  * \param cloud the input point cloud
  * \param search_methods vector of all search methods to be tested
  * \param query_indices indices of query points in the point cloud (not necessarily in input_indices)
  * \param input_indices indices defining a subset of the point cloud.
  */
template<typename PointT> void
testRadiusSearch (typename PointCloud<PointT>::ConstPtr point_cloud, vector<search::Search<PointT>*> search_methods, 
                   const vector<int>& query_indices, const vector<int>& input_indices = vector<int> ())
{
  vector< vector<int> >indices (search_methods.size ());
  vector< vector<float> >distances (search_methods.size ());
  vector <bool> passed (search_methods.size (), true);
  vector<bool> indices_mask (point_cloud->size (), true);
  vector<bool> nan_mask (point_cloud->size (), true);
  
  if (input_indices.size () != 0)
  {
    indices_mask.assign (point_cloud->size (), false);
    for (vector<int>::const_iterator iIt = input_indices.begin (); iIt != input_indices.end (); ++iIt)
      indices_mask [*iIt] = true;
  }
  
  // remove also Nans
  #pragma omp parallel for
  for (int pIdx = 0; pIdx < int (point_cloud->size ()); ++pIdx)
  {
    if (!isFinite (point_cloud->points [pIdx]))
      nan_mask [pIdx] = false;
  }
  
  boost::shared_ptr<vector<int> > input_indices_;
  if (input_indices.size ())
    input_indices_.reset (new vector<int> (input_indices));
  
  #pragma omp parallel for
  for (int sIdx = 0; sIdx < int (search_methods.size ()); ++sIdx)
    search_methods [sIdx]->setInputCloud (point_cloud, input_indices_);

  // test radii 0.01, 0.02, 0.04, 0.08
  for (float radius = 0.01f; radius < 0.1f; radius *= 2.0f)
  {
    //cout << radius << endl;
    // find nn for each point in the cloud
    for (vector<int>::const_iterator qIt = query_indices.begin (); qIt != query_indices.end (); ++qIt)
    {
      #pragma omp parallel for
      for (int sIdx = 0; sIdx < static_cast<int> (search_methods.size ()); ++sIdx)
      {
        search_methods [sIdx]->radiusSearch (point_cloud->points[*qIt], radius, indices [sIdx], distances [sIdx], 0);
        passed [sIdx] = passed [sIdx] && testUniqueness (indices [sIdx], search_methods [sIdx]->getName ());
        passed [sIdx] = passed [sIdx] && testOrder (distances [sIdx], search_methods [sIdx]->getName ());
        passed [sIdx] = passed [sIdx] && testResultValidity<PointT>(point_cloud, indices_mask, nan_mask, indices [sIdx], input_indices, search_methods [sIdx]->getName ());
      }
      
      // compare results to each other
      #pragma omp parallel for
      for (int sIdx = 1; sIdx < static_cast<int> (search_methods.size ()); ++sIdx)
      {
        passed [sIdx] = passed [sIdx] && compareResults (indices [0],    distances [0],    search_methods [0]->getName (),
                                                         indices [sIdx], distances [sIdx], search_methods [sIdx]->getName (), 1e-6f);
      }
    }
  }
  for (unsigned sIdx = 0; sIdx < search_methods.size (); ++sIdx)
  {
    cout << search_methods [sIdx]->getName () << ": " << (passed[sIdx]?"passed":"failed") << endl;
    EXPECT_TRUE (passed [sIdx]);
  }
}
예제 #6
0
    typename PointCloud<PointT>::Ptr transform(
            const typename PointCloud<PointT>::Ptr source,
            const typename PointCloud<PointT>::Ptr target,boost::shared_ptr<Configuration> configuration)
    {
        source_cloud = *source;
        target_cloud = *target;

        cout << "[PFHTransformStrategy::transform] Setting source cloud: "
            << source->size() << " points" << endl;
        cout << "[PFHTransformStrategy::transform] Setting target cloud: "
            << target->size() << " points" << endl;

        typename PointCloud<PointT>::Ptr transformed(new PointCloud<PointT>);

        typename PointCloud<PointXYZRGB>::Ptr source_points(source);
        typename PointCloud<PointXYZRGB>::Ptr source_filtered(
                new PointCloud<PointT>);
        PointCloud<Normal>::Ptr source_normals(new PointCloud<Normal>);
        PointCloud<PointWithScale>::Ptr source_keypoints(
                new PointCloud<PointWithScale>);
        PointCloud<PFHSignature125>::Ptr source_descriptors(
                new PointCloud<PFHSignature125>);

        typename PointCloud<PointT>::Ptr target_points(target);
        typename PointCloud<PointT>::Ptr target_filtered(
                new PointCloud<PointT>);
        PointCloud<Normal>::Ptr target_normals(
                new PointCloud<Normal>);
        PointCloud<PointWithScale>::Ptr target_keypoints(
                new PointCloud<PointWithScale>);
        PointCloud<PFHSignature125>::Ptr target_descriptors(
                new PointCloud<PFHSignature125>);

        cout << "[PFHTransformStrategy::transform] Downsampling source and "
            << "target clouds..." << endl;

        //filter(source_points, source_filtered, 0.01f);
        //filter(target_points, target_filtered, 0.01f);
        source_filtered=source_points;
        target_filtered=target_points;
        cout << "[PFHTransformStrategy::transform] Creating normals for "
            << "source and target cloud..." << endl;

        create_normals<Normal>(source_filtered, source_normals);
        create_normals<Normal>(target_filtered, target_normals);

        cout << "[PFHTransformStrategy::transform] Finding keypoints in "
            << "source and target cloud..." << endl;

        detect_keypoints(source_filtered, source_keypoints);
        detect_keypoints(target_filtered, target_keypoints);

        for(PointWithScale p: source_keypoints->points){
        	cout<<"keypoint "<<p;
        }

        vector<int> source_indices(source_keypoints->points.size());
        vector<int> target_indices(target_keypoints->points.size());

        cout << "[PFHTransformStrategy::transform] Computing PFH features "
            << "for source and target cloud..." << endl;

        compute_PFH_features_at_keypoints(source_filtered, source_normals,source_keypoints,
                source_descriptors, target_indices);
        compute_PFH_features_at_keypoints(target_filtered, target_normals,target_keypoints,
                target_descriptors, target_indices);

        vector<int> correspondences;
        vector<float> correspondence_scores;

        find_feature_correspondence(source_descriptors, target_descriptors,
                correspondences, correspondence_scores);

        cout << "correspondences: " << correspondences.size() << endl;
        cout << "c. scores: " << correspondence_scores.size() << endl;

        cout << "First cloud: Found " << source_keypoints->size() << " keypoints "
            << "out of " << source_filtered->size() << " total points." << endl;
        cout << "Second cloud: Found " << target_keypoints->size() << " keypoints"
            << " out of " << target_filtered->size() << " total points." << endl;

        // Start with the actual transformation. Yeay :)
        TransformationFromCorrespondences tfc;
        tfc.reset();

        vector<int> sorted_scores;
        cv::sortIdx(correspondence_scores, sorted_scores, CV_SORT_EVERY_ROW);

        vector<float> tmp(correspondence_scores);
        sort(tmp.begin(), tmp.end());

        float median_score = tmp[tmp.size() / 2];
        vector<int> fidx;
        vector<int> fidxt;

        Eigen::Vector3f source_position(0, 0, 0);
        Eigen::Vector3f target_position(0, 0, 0);
        for (size_t i = 0; i < correspondence_scores.size(); i++) {
            int index = sorted_scores[i];
            if (median_score >= correspondence_scores[index]) {
                source_position[0] = source_keypoints->points[index].x;
                source_position[1] = source_keypoints->points[index].y;
                source_position[2] = source_keypoints->points[index].z;

                target_position[0] = target_keypoints->points[index].x;
                target_position[1] = target_keypoints->points[index].y;
                target_position[2] = target_keypoints->points[index].z;

                if (abs(source_position[1] - target_position[1]) > 0.2) {
                    continue;
                }
               // cout<< "abs position difference:"<<abs(source_position[1] - target_position[1])<<"C-Score"<<correspondence_scores[index]<<endl;
               // cout<<" Source Position " <<source_position<<endl;
               // cout<<" target position "<<target_position<<endl;
                tfc.add(source_position, target_position,
                        correspondence_scores[index]);
                fidx.push_back(source_indices[index]);
                fidxt.push_back(target_indices[correspondences[index]]);
            }
        }
        cout << "TFC samples: "<<tfc.getNoOfSamples()<<" AccumulatedWeight "<<tfc.getAccumulatedWeight()<<endl;
        Eigen::Affine3f tr;
        tr = tfc.getTransformation();
        cout << "TFC transformation: " << endl;
        for (int i = 0; i < 3; i++) {
            for (int j = 0; j < 3; j++) {
                cout << tr.rotation()(i, i) << "\t";
            }
            cout << endl;
        }

        transformPointCloud(*source_filtered, *transformed,
                tfc.getTransformation());
        cout << "transformation finished";
        return transformed;
    };
예제 #7
0
    // Downsample a point cloud by using a voxel grid.
    // See http://pointclouds.org/documentation/tutorials/voxel_grid.php
    void filter (typename PointCloud<PointT>::Ptr cloud,
            typename PointCloud<PointT>::Ptr cloud_filtered,
            float leaf_size=0.01f)
    {
        cout << "[PFHTransformationStrategy::filter] Input cloud:"
            << endl << "    " << cloud->points.size() << " points" << endl;

        typename PointCloud<PointT>::Ptr tmp_ptr1(new PointCloud<PointT>);

        VoxelGrid<PointT> vox_grid;
        vox_grid.setInputCloud(cloud);
        vox_grid.setSaveLeafLayout(true);
        vox_grid.setLeafSize(leaf_size, leaf_size, leaf_size);

        cout << "[PFHTransformationStrategy::filter] Creating a voxel grid:"
            << endl << "    leaf size: [" << leaf_size << ", "
            << leaf_size << ", " << leaf_size << "]" << endl;

        // Skip the rest...
        // vox_grid.filter(*tmp_ptr1);
        vox_grid.filter(*cloud_filtered);

        cout << "[PFHTransformationStrategy::filter] Result of voxel grid"
            << " filtering:" << endl << "    " << cloud_filtered->points.size()
            << " points remaining" << endl;

        return;

        typename PointCloud<PointT>::Ptr tmp_ptr2(new PointCloud<PointT>);

        float pass1_limit_min = 0.0;
        float pass1_limit_max = 3.0;

        PassThrough<PointT> pass1;
        pass1.setInputCloud(tmp_ptr1);
        pass1.setFilterFieldName("z");
        pass1.setFilterLimits(pass1_limit_min, pass1_limit_max);

        cout << "[PFH Transformation : Downsampling] starting first filter"
            << " pass through:" << endl;
        cout << "  filter field name: " << pass1.getFilterFieldName() << endl;
        cout << "  filter limits: min = " << pass1_limit_min << ", max = "
            << pass1_limit_max << endl;

        float avg;
        for (PointT point : *tmp_ptr1) {
            avg += point.z;
        }

        cout << "  average field value: " << avg / tmp_ptr1->size() << endl;

        pass1.filter(*tmp_ptr2);

        cout << "[PFH Transformation : Downsampling] result of first pass:"******"  " << tmp_ptr2->points.size() << " points remaining."
            << endl;

        float pass2_limit_min = -2.0;
        float pass2_limit_max = 1.0;

        PassThrough<PointT> pass2;
        pass2.setInputCloud(tmp_ptr2);
        pass2.setFilterFieldName("x");
        pass2.setFilterLimits(pass2_limit_min, pass2_limit_max);

        cout << "[PFH Transformation : Downsampling] starting second filter"
            << " pass through:" << endl;
        cout << "  filter field name: " << pass2.getFilterFieldName() << endl;
        cout << "  filter limits: min = " << pass2_limit_min << ", max = "
            << pass2_limit_max << endl;

        pass2.filter(*cloud_filtered);

        cout << "[PFH Transformation : Downsampling] result of second pass:"******"  " << cloud_filtered->points.size()
            << " points remaining" << endl;
        cout << "[PFH Transformation : Downsampling] size of output cloud:"
            << endl << "  " << cloud_filtered->points.size() << " points"
            << endl << endl;
    };