template <typename PointT, typename PointNT> int pcl::SampleConsensusModelCylinder<PointT, PointNT>::countWithinDistance ( const Eigen::VectorXf &model_coefficients, const double threshold) { // Check if the model is valid given the user constraints if (!isModelValid (model_coefficients)) return (0); int nr_p = 0; Eigen::Vector4f line_pt (model_coefficients[0], model_coefficients[1], model_coefficients[2], 0); Eigen::Vector4f line_dir (model_coefficients[3], model_coefficients[4], model_coefficients[5], 0); float ptdotdir = line_pt.dot (line_dir); float dirdotdir = 1.0f / line_dir.dot (line_dir); // Iterate through the 3d points and calculate the distances from them to the sphere for (size_t i = 0; i < indices_->size (); ++i) { // Aproximate the distance from the point to the cylinder as the difference between // dist(point,cylinder_axis) and cylinder radius Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0); Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0); double d_euclid = fabs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]); // Calculate the point's projection on the cylinder axis float k = (pt.dot (line_dir) - ptdotdir) * dirdotdir; Eigen::Vector4f pt_proj = line_pt + k * line_dir; Eigen::Vector4f dir = pt - pt_proj; dir.normalize (); // Calculate the angular distance between the point normal and the (dir=pt_proj->pt) vector double d_normal = fabs (getAngle3D (n, dir)); d_normal = (std::min) (d_normal, M_PI - d_normal); if (fabs (normal_distance_weight_ * d_normal + (1 - normal_distance_weight_) * d_euclid) < threshold) nr_p++; } return (nr_p); }
template <typename PointT, typename PointNT> int pcl::SampleConsensusModelNormalParallelPlane<PointT, PointNT>::countWithinDistance ( const Eigen::VectorXf &model_coefficients, const double threshold) { if (!normals_) { PCL_ERROR ("[pcl::SampleConsensusModelNormalParallelPlane::countWithinDistance] No input dataset containing normals was given!\n"); return (0); } // Check if the model is valid given the user constraints if (!isModelValid (model_coefficients)) return (0); // Obtain the plane normal Eigen::Vector4f coeff = model_coefficients; coeff[3] = 0; int nr_p = 0; // Iterate through the 3d points and calculate the distances from them to the plane for (size_t i = 0; i < indices_->size (); ++i) { // Calculate the distance from the point to the plane normal as the dot product // D = (P-A).N/|N| Eigen::Vector4f p (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0); Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0); double d_euclid = fabs (coeff.dot (p) + model_coefficients[3]); // Calculate the angular distance between the point normal and the plane normal double d_normal = fabs (getAngle3D (n, coeff)); d_normal = (std::min) (d_normal, M_PI - d_normal); if (fabs (normal_distance_weight_ * d_normal + (1 - normal_distance_weight_) * d_euclid) < threshold) nr_p++; } return (nr_p); }
void pcl::apps::DominantPlaneSegmentation<PointType>::compute_full (std::vector<CloudPtr> & clusters) { // Has the input dataset been set already? if (!input_) { PCL_WARN ("[pcl::apps::DominantPlaneSegmentation] No input dataset given!\n"); return; } CloudConstPtr cloud_; CloudPtr cloud_filtered_ (new Cloud ()); CloudPtr cloud_downsampled_ (new Cloud ()); pcl::PointCloud<pcl::Normal>::Ptr cloud_normals_ (new pcl::PointCloud<pcl::Normal> ()); pcl::PointIndices::Ptr table_inliers_ (new pcl::PointIndices ()); pcl::ModelCoefficients::Ptr table_coefficients_ (new pcl::ModelCoefficients ()); CloudPtr table_projected_ (new Cloud ()); CloudPtr table_hull_ (new Cloud ()); CloudPtr cloud_objects_ (new Cloud ()); CloudPtr cluster_object_ (new Cloud ()); typename pcl::search::KdTree<PointType>::Ptr normals_tree_ (new pcl::search::KdTree<PointType>); typename pcl::search::KdTree<PointType>::Ptr clusters_tree_ (new pcl::search::KdTree<PointType>); clusters_tree_->setEpsilon (1); // Normal estimation parameters n3d_.setKSearch (10); n3d_.setSearchMethod (normals_tree_); // Table model fitting parameters seg_.setDistanceThreshold (sac_distance_threshold_); seg_.setMaxIterations (2000); seg_.setNormalDistanceWeight (0.1); seg_.setOptimizeCoefficients (true); seg_.setModelType (pcl::SACMODEL_NORMAL_PLANE); seg_.setMethodType (pcl::SAC_MSAC); seg_.setProbability (0.98); proj_.setModelType (pcl::SACMODEL_NORMAL_PLANE); bb_cluster_proj_.setModelType (pcl::SACMODEL_NORMAL_PLANE); prism_.setHeightLimits (object_min_height_, object_max_height_); // Clustering parameters cluster_.setClusterTolerance (object_cluster_tolerance_); cluster_.setMinClusterSize (object_cluster_min_size_); cluster_.setSearchMethod (clusters_tree_); // ---[ PassThroughFilter pass_.setFilterLimits (min_z_bounds_, max_z_bounds_); pass_.setFilterFieldName ("z"); pass_.setInputCloud (input_); pass_.filter (*cloud_filtered_); if (int (cloud_filtered_->points.size ()) < k_) { PCL_WARN ("[DominantPlaneSegmentation] Filtering returned %zu points! Aborting.", cloud_filtered_->points.size ()); return; } // Downsample the point cloud grid_.setLeafSize (downsample_leaf_, downsample_leaf_, downsample_leaf_); grid_.setDownsampleAllData (false); grid_.setInputCloud (cloud_filtered_); grid_.filter (*cloud_downsampled_); PCL_INFO ("[DominantPlaneSegmentation] Number of points left after filtering&downsampling (%f -> %f): %zu out of %zu\n", min_z_bounds_, max_z_bounds_, cloud_downsampled_->points.size (), input_->points.size ()); // ---[ Estimate the point normals n3d_.setInputCloud (cloud_downsampled_); n3d_.compute (*cloud_normals_); PCL_INFO ("[DominantPlaneSegmentation] %zu normals estimated. \n", cloud_normals_->points.size ()); // ---[ Perform segmentation seg_.setInputCloud (cloud_downsampled_); seg_.setInputNormals (cloud_normals_); seg_.segment (*table_inliers_, *table_coefficients_); if (table_inliers_->indices.size () == 0) { PCL_WARN ("[DominantPlaneSegmentation] No Plane Inliers points! Aborting."); return; } // ---[ Extract the plane proj_.setInputCloud (cloud_downsampled_); proj_.setIndices (table_inliers_); proj_.setModelCoefficients (table_coefficients_); proj_.filter (*table_projected_); // ---[ Estimate the convex hull std::vector<pcl::Vertices> polygons; CloudPtr table_hull (new Cloud ()); hull_.setInputCloud (table_projected_); hull_.reconstruct (*table_hull, polygons); // Compute the plane coefficients Eigen::Vector4f model_coefficients; EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; model_coefficients[0] = table_coefficients_->values[0]; model_coefficients[1] = table_coefficients_->values[1]; model_coefficients[2] = table_coefficients_->values[2]; model_coefficients[3] = table_coefficients_->values[3]; // Need to flip the plane normal towards the viewpoint Eigen::Vector4f vp (0, 0, 0, 0); // See if we need to flip any plane normals vp -= table_hull->points[0].getVector4fMap (); vp[3] = 0; // Dot product between the (viewpoint - point) and the plane normal float cos_theta = vp.dot (model_coefficients); // Flip the plane normal if (cos_theta < 0) { model_coefficients *= -1; model_coefficients[3] = 0; // Hessian form (D = nc . p_plane (centroid here) + p) model_coefficients[3] = -1 * (model_coefficients.dot (table_hull->points[0].getVector4fMap ())); } //Set table_coeffs table_coeffs_ = model_coefficients; // ---[ Get the objects on top of the table pcl::PointIndices cloud_object_indices; prism_.setInputCloud (cloud_filtered_); prism_.setInputPlanarHull (table_hull); prism_.segment (cloud_object_indices); pcl::ExtractIndices<PointType> extract_object_indices; extract_object_indices.setInputCloud (cloud_downsampled_); extract_object_indices.setIndices (boost::make_shared<const pcl::PointIndices> (cloud_object_indices)); extract_object_indices.filter (*cloud_objects_); if (cloud_objects_->points.size () == 0) return; // ---[ Split the objects into Euclidean clusters std::vector<pcl::PointIndices> clusters2; cluster_.setInputCloud (cloud_filtered_); cluster_.setIndices (boost::make_shared<const pcl::PointIndices> (cloud_object_indices)); cluster_.extract (clusters2); PCL_INFO ("[DominantPlaneSegmentation::compute_full()] Number of clusters found matching the given constraints: %zu.\n", clusters2.size ()); clusters.resize (clusters2.size ()); for (size_t i = 0; i < clusters2.size (); ++i) { clusters[i] = CloudPtr (new Cloud ()); pcl::copyPointCloud (*cloud_filtered_, clusters2[i].indices, *clusters[i]); } }
template<typename PointType> void pcl::apps::DominantPlaneSegmentation<PointType>::compute_table_plane () { // Has the input dataset been set already? if (!input_) { PCL_WARN ("[pcl::apps::DominantPlaneSegmentation] No input dataset given!\n"); return; } CloudConstPtr cloud_; CloudPtr cloud_filtered_ (new Cloud ()); CloudPtr cloud_downsampled_ (new Cloud ()); pcl::PointCloud<pcl::Normal>::Ptr cloud_normals_ (new pcl::PointCloud<pcl::Normal> ()); pcl::PointIndices::Ptr table_inliers_ (new pcl::PointIndices ()); pcl::ModelCoefficients::Ptr table_coefficients_ (new pcl::ModelCoefficients ()); CloudPtr table_projected_ (new Cloud ()); CloudPtr table_hull_ (new Cloud ()); typename pcl::search::KdTree<PointType>::Ptr normals_tree_ (new pcl::search::KdTree<PointType>); // Normal estimation parameters n3d_.setKSearch (k_); n3d_.setSearchMethod (normals_tree_); // Table model fitting parameters seg_.setDistanceThreshold (sac_distance_threshold_); seg_.setMaxIterations (2000); seg_.setNormalDistanceWeight (0.1); seg_.setOptimizeCoefficients (true); seg_.setModelType (pcl::SACMODEL_NORMAL_PLANE); seg_.setMethodType (pcl::SAC_RANSAC); seg_.setProbability (0.99); proj_.setModelType (pcl::SACMODEL_NORMAL_PLANE); bb_cluster_proj_.setModelType (pcl::SACMODEL_NORMAL_PLANE); // ---[ PassThroughFilter pass_.setFilterLimits (min_z_bounds_, max_z_bounds_); pass_.setFilterFieldName ("z"); pass_.setInputCloud (input_); pass_.filter (*cloud_filtered_); if (int (cloud_filtered_->points.size ()) < k_) { PCL_WARN ("[DominantPlaneSegmentation] Filtering returned %zu points! Aborting.", cloud_filtered_->points.size ()); return; } // Downsample the point cloud grid_.setLeafSize (downsample_leaf_, downsample_leaf_, downsample_leaf_); grid_.setDownsampleAllData (false); grid_.setInputCloud (cloud_filtered_); grid_.filter (*cloud_downsampled_); // ---[ Estimate the point normals n3d_.setInputCloud (cloud_downsampled_); n3d_.compute (*cloud_normals_); // ---[ Perform segmentation seg_.setInputCloud (cloud_downsampled_); seg_.setInputNormals (cloud_normals_); seg_.segment (*table_inliers_, *table_coefficients_); if (table_inliers_->indices.size () == 0) { PCL_WARN ("[DominantPlaneSegmentation] No Plane Inliers points! Aborting."); return; } // ---[ Extract the plane proj_.setInputCloud (cloud_downsampled_); proj_.setIndices (table_inliers_); proj_.setModelCoefficients (table_coefficients_); proj_.filter (*table_projected_); // ---[ Estimate the convex hull std::vector<pcl::Vertices> polygons; CloudPtr table_hull (new Cloud ()); hull_.setInputCloud (table_projected_); hull_.reconstruct (*table_hull, polygons); // Compute the plane coefficients Eigen::Vector4f model_coefficients; EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; model_coefficients[0] = table_coefficients_->values[0]; model_coefficients[1] = table_coefficients_->values[1]; model_coefficients[2] = table_coefficients_->values[2]; model_coefficients[3] = table_coefficients_->values[3]; // Need to flip the plane normal towards the viewpoint Eigen::Vector4f vp (0, 0, 0, 0); // See if we need to flip any plane normals vp -= table_hull->points[0].getVector4fMap (); vp[3] = 0; // Dot product between the (viewpoint - point) and the plane normal float cos_theta = vp.dot (model_coefficients); // Flip the plane normal if (cos_theta < 0) { model_coefficients *= -1; model_coefficients[3] = 0; // Hessian form (D = nc . p_plane (centroid here) + p) model_coefficients[3] = -1 * (model_coefficients.dot (table_hull->points[0].getVector4fMap ())); } //Set table_coeffs table_coeffs_ = model_coefficients; }
template<typename PointType> void pcl::apps::DominantPlaneSegmentation<PointType>::compute_fast (std::vector<CloudPtr> & clusters) { // Has the input dataset been set already? if (!input_) { PCL_WARN ("[pcl::apps::DominantPlaneSegmentation] No input dataset given!\n"); return; } // Is the input dataset organized? if (input_->is_dense) { PCL_WARN ("[pcl::apps::DominantPlaneSegmentation] compute_fast can only be used with organized point clouds!\n"); return; } CloudConstPtr cloud_; CloudPtr cloud_filtered_ (new Cloud ()); CloudPtr cloud_downsampled_ (new Cloud ()); pcl::PointCloud<pcl::Normal>::Ptr cloud_normals_ (new pcl::PointCloud<pcl::Normal> ()); pcl::PointIndices::Ptr table_inliers_ (new pcl::PointIndices ()); pcl::ModelCoefficients::Ptr table_coefficients_ (new pcl::ModelCoefficients ()); CloudPtr table_projected_ (new Cloud ()); CloudPtr table_hull_ (new Cloud ()); CloudPtr cloud_objects_ (new Cloud ()); CloudPtr cluster_object_ (new Cloud ()); typename pcl::search::KdTree<PointType>::Ptr normals_tree_ (new pcl::search::KdTree<PointType>); typename pcl::search::KdTree<PointType>::Ptr clusters_tree_ (new pcl::search::KdTree<PointType>); clusters_tree_->setEpsilon (1); // Normal estimation parameters n3d_.setKSearch (k_); n3d_.setSearchMethod (normals_tree_); // Table model fitting parameters seg_.setDistanceThreshold (sac_distance_threshold_); seg_.setMaxIterations (2000); seg_.setNormalDistanceWeight (0.1); seg_.setOptimizeCoefficients (true); seg_.setModelType (pcl::SACMODEL_NORMAL_PLANE); seg_.setMethodType (pcl::SAC_RANSAC); seg_.setProbability (0.99); proj_.setModelType (pcl::SACMODEL_NORMAL_PLANE); bb_cluster_proj_.setModelType (pcl::SACMODEL_NORMAL_PLANE); prism_.setHeightLimits (object_min_height_, object_max_height_); // Clustering parameters cluster_.setClusterTolerance (object_cluster_tolerance_); cluster_.setMinClusterSize (object_cluster_min_size_); cluster_.setSearchMethod (clusters_tree_); // ---[ PassThroughFilter pass_.setFilterLimits (min_z_bounds_, max_z_bounds_); pass_.setFilterFieldName ("z"); pass_.setInputCloud (input_); pass_.filter (*cloud_filtered_); if (int (cloud_filtered_->points.size ()) < k_) { PCL_WARN ("[DominantPlaneSegmentation] Filtering returned %zu points! Aborting.", cloud_filtered_->points.size ()); return; } // Downsample the point cloud grid_.setLeafSize (downsample_leaf_, downsample_leaf_, downsample_leaf_); grid_.setDownsampleAllData (false); grid_.setInputCloud (cloud_filtered_); grid_.filter (*cloud_downsampled_); // ---[ Estimate the point normals n3d_.setInputCloud (cloud_downsampled_); n3d_.compute (*cloud_normals_); // ---[ Perform segmentation seg_.setInputCloud (cloud_downsampled_); seg_.setInputNormals (cloud_normals_); seg_.segment (*table_inliers_, *table_coefficients_); if (table_inliers_->indices.size () == 0) { PCL_WARN ("[DominantPlaneSegmentation] No Plane Inliers points! Aborting."); return; } // ---[ Extract the plane proj_.setInputCloud (cloud_downsampled_); proj_.setIndices (table_inliers_); proj_.setModelCoefficients (table_coefficients_); proj_.filter (*table_projected_); // ---[ Estimate the convex hull std::vector<pcl::Vertices> polygons; CloudPtr table_hull (new Cloud ()); hull_.setInputCloud (table_projected_); hull_.reconstruct (*table_hull, polygons); // Compute the plane coefficients Eigen::Vector4f model_coefficients; EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; model_coefficients[0] = table_coefficients_->values[0]; model_coefficients[1] = table_coefficients_->values[1]; model_coefficients[2] = table_coefficients_->values[2]; model_coefficients[3] = table_coefficients_->values[3]; // Need to flip the plane normal towards the viewpoint Eigen::Vector4f vp (0, 0, 0, 0); // See if we need to flip any plane normals vp -= table_hull->points[0].getVector4fMap (); vp[3] = 0; // Dot product between the (viewpoint - point) and the plane normal float cos_theta = vp.dot (model_coefficients); // Flip the plane normal if (cos_theta < 0) { model_coefficients *= -1; model_coefficients[3] = 0; // Hessian form (D = nc . p_plane (centroid here) + p) model_coefficients[3] = -1 * (model_coefficients.dot (table_hull->points[0].getVector4fMap ())); } //Set table_coeffs table_coeffs_ = model_coefficients; // ---[ Get the objects on top of the table pcl::PointIndices cloud_object_indices; prism_.setInputCloud (input_); prism_.setInputPlanarHull (table_hull); prism_.segment (cloud_object_indices); pcl::ExtractIndices<PointType> extract_object_indices; extract_object_indices.setInputCloud (input_); extract_object_indices.setIndices (boost::make_shared<const pcl::PointIndices> (cloud_object_indices)); extract_object_indices.filter (*cloud_objects_); //create new binary pointcloud with intensity values (0 and 1), 0 for non-object pixels and 1 otherwise pcl::PointCloud<pcl::PointXYZI>::Ptr binary_cloud (new pcl::PointCloud<pcl::PointXYZI>); { binary_cloud->width = input_->width; binary_cloud->height = input_->height; binary_cloud->points.resize (input_->points.size ()); binary_cloud->is_dense = input_->is_dense; size_t idx; for (size_t i = 0; i < cloud_object_indices.indices.size (); ++i) { idx = cloud_object_indices.indices[i]; binary_cloud->points[idx].getVector4fMap () = input_->points[idx].getVector4fMap (); binary_cloud->points[idx].intensity = 1.0; } } //connected components on the binary image std::map<float, float> connected_labels; float c_intensity = 0.1f; float intensity_incr = 0.1f; { int wsize = wsize_; for (int i = 0; i < int (binary_cloud->width); i++) { for (int j = 0; j < int (binary_cloud->height); j++) { if (binary_cloud->at (i, j).intensity != 0) { //check neighboring pixels, first left and then top //be aware of margins if ((i - 1) < 0 && (j - 1) < 0) { //top-left pixel (*binary_cloud) (i, j).intensity = c_intensity; } else { if ((j - 1) < 0) { //top-row, check on the left of pixel to assign a new label or not int left = check ((*binary_cloud) (i - 1, j), (*binary_cloud) (i, j), c_intensity, object_cluster_tolerance_); if (left) { //Nothing found on the left, check bigger window bool found = false; for (int kk = 2; kk < wsize && !found; kk++) { if ((i - kk) < 0) continue; int left = check ((*binary_cloud) (i - kk, j), (*binary_cloud) (i, j), c_intensity, object_cluster_tolerance_); if (left == 0) { found = true; } } if (!found) { c_intensity += intensity_incr; (*binary_cloud) (i, j).intensity = c_intensity; } } } else { if ((i - 1) == 0) { //check only top int top = check ((*binary_cloud) (i, j - 1), (*binary_cloud) (i, j), c_intensity, object_cluster_tolerance_); if (top) { bool found = false; for (int kk = 2; kk < wsize && !found; kk++) { if ((j - kk) < 0) continue; int top = check ((*binary_cloud) (i, j - kk), (*binary_cloud) (i, j), c_intensity, object_cluster_tolerance_); if (top == 0) { found = true; } } if (!found) { c_intensity += intensity_incr; (*binary_cloud) (i, j).intensity = c_intensity; } } } else { //check left and top int left = check ((*binary_cloud) (i - 1, j), (*binary_cloud) (i, j), c_intensity, object_cluster_tolerance_); int top = check ((*binary_cloud) (i, j - 1), (*binary_cloud) (i, j), c_intensity, object_cluster_tolerance_); if (left == 0 && top == 0) { //both top and left had labels, check if they are different //if they are, take the smallest one and mark labels to be connected.. if ((*binary_cloud) (i - 1, j).intensity != (*binary_cloud) (i, j - 1).intensity) { float smaller_intensity = (*binary_cloud) (i - 1, j).intensity; float bigger_intensity = (*binary_cloud) (i, j - 1).intensity; if ((*binary_cloud) (i - 1, j).intensity > (*binary_cloud) (i, j - 1).intensity) { smaller_intensity = (*binary_cloud) (i, j - 1).intensity; bigger_intensity = (*binary_cloud) (i - 1, j).intensity; } connected_labels[bigger_intensity] = smaller_intensity; (*binary_cloud) (i, j).intensity = smaller_intensity; } } if (left == 1 && top == 1) { //if none had labels, increment c_intensity //search first on bigger window bool found = false; for (int dist = 2; dist < wsize && !found; dist++) { if (((i - dist) < 0) || ((j - dist) < 0)) continue; int left = check ((*binary_cloud) (i - dist, j), (*binary_cloud) (i, j), c_intensity, object_cluster_tolerance_); int top = check ((*binary_cloud) (i, j - dist), (*binary_cloud) (i, j), c_intensity, object_cluster_tolerance_); if (left == 0 && top == 0) { if ((*binary_cloud) (i - dist, j).intensity != (*binary_cloud) (i, j - dist).intensity) { float smaller_intensity = (*binary_cloud) (i - dist, j).intensity; float bigger_intensity = (*binary_cloud) (i, j - dist).intensity; if ((*binary_cloud) (i - dist, j).intensity > (*binary_cloud) (i, j - dist).intensity) { smaller_intensity = (*binary_cloud) (i, j - dist).intensity; bigger_intensity = (*binary_cloud) (i - dist, j).intensity; } connected_labels[bigger_intensity] = smaller_intensity; (*binary_cloud) (i, j).intensity = smaller_intensity; found = true; } } else if (left == 0 || top == 0) { //one had label found = true; } } if (!found) { //none had label in the bigger window c_intensity += intensity_incr; (*binary_cloud) (i, j).intensity = c_intensity; } } } } } } } } } std::map<float, pcl::PointIndices> clusters_map; { std::map<float, float>::iterator it; for (int i = 0; i < int (binary_cloud->width); i++) { for (int j = 0; j < int (binary_cloud->height); j++) { if (binary_cloud->at (i, j).intensity != 0) { //check if this is a root label... it = connected_labels.find (binary_cloud->at (i, j).intensity); while (it != connected_labels.end ()) { //the label is on the list, change pixel intensity until it has a root label (*binary_cloud) (i, j).intensity = (*it).second; it = connected_labels.find (binary_cloud->at (i, j).intensity); } std::map<float, pcl::PointIndices>::iterator it_indices; it_indices = clusters_map.find (binary_cloud->at (i, j).intensity); if (it_indices == clusters_map.end ()) { pcl::PointIndices indices; clusters_map[binary_cloud->at (i, j).intensity] = indices; } clusters_map[binary_cloud->at (i, j).intensity].indices.push_back (static_cast<int> (j * binary_cloud->width + i)); } } } } clusters.resize (clusters_map.size ()); std::map<float, pcl::PointIndices>::iterator it_indices; int k = 0; for (it_indices = clusters_map.begin (); it_indices != clusters_map.end (); it_indices++) { if (int ((*it_indices).second.indices.size ()) >= object_cluster_min_size_) { clusters[k] = CloudPtr (new Cloud ()); pcl::copyPointCloud (*input_, (*it_indices).second.indices, *clusters[k]); k++; indices_clusters_.push_back((*it_indices).second); } } clusters.resize (k); }
template <typename PointInT, typename PointNT, typename PointOutT> void pcl::VFHEstimation<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output) { // ---[ Step 1a : compute the centroid in XYZ space Eigen::Vector4f xyz_centroid; if (use_given_centroid_) xyz_centroid = centroid_to_use_; else compute3DCentroid (*surface_, *indices_, xyz_centroid); // Estimate the XYZ centroid // ---[ Step 1b : compute the centroid in normal space Eigen::Vector4f normal_centroid = Eigen::Vector4f::Zero (); int cp = 0; // If the data is dense, we don't need to check for NaN if (use_given_normal_) normal_centroid = normal_to_use_; else { if (normals_->is_dense) { for (size_t i = 0; i < indices_->size (); ++i) { normal_centroid += normals_->points[(*indices_)[i]].getNormalVector4fMap (); cp++; } } // NaN or Inf values could exist => check for them else { for (size_t i = 0; i < indices_->size (); ++i) { if (!pcl_isfinite (normals_->points[(*indices_)[i]].normal[0]) || !pcl_isfinite (normals_->points[(*indices_)[i]].normal[1]) || !pcl_isfinite (normals_->points[(*indices_)[i]].normal[2])) continue; normal_centroid += normals_->points[(*indices_)[i]].getNormalVector4fMap (); cp++; } } normal_centroid /= cp; } // Compute the direction of view from the viewpoint to the centroid Eigen::Vector4f viewpoint (vpx_, vpy_, vpz_, 0); Eigen::Vector4f d_vp_p = viewpoint - xyz_centroid; d_vp_p.normalize (); // Estimate the SPFH at nn_indices[0] using the entire cloud computePointSPFHSignature (xyz_centroid, normal_centroid, *surface_, *normals_, *indices_); // We only output _1_ signature output.points.resize (1); output.width = 1; output.height = 1; // Estimate the FPFH at nn_indices[0] using the entire cloud and copy the resultant signature for (int d = 0; d < hist_f1_.size (); ++d) output.points[0].histogram[d + 0] = hist_f1_[d]; size_t data_size = hist_f1_.size (); for (int d = 0; d < hist_f2_.size (); ++d) output.points[0].histogram[d + data_size] = hist_f2_[d]; data_size += hist_f2_.size (); for (int d = 0; d < hist_f3_.size (); ++d) output.points[0].histogram[d + data_size] = hist_f3_[d]; data_size += hist_f3_.size (); for (int d = 0; d < hist_f4_.size (); ++d) output.points[0].histogram[d + data_size] = hist_f4_[d]; // ---[ Step 2 : obtain the viewpoint component hist_vp_.setZero (nr_bins_vp_); double hist_incr; if (normalize_bins_) hist_incr = 100.0 / (double)(indices_->size ()); else hist_incr = 1.0; for (size_t i = 0; i < indices_->size (); ++i) { Eigen::Vector4f normal (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0); // Normalize double alpha = (normal.dot (d_vp_p) + 1.0) * 0.5; int fi = floor (alpha * hist_vp_.size ()); if (fi < 0) fi = 0; if (fi > ((int)hist_vp_.size () - 1)) fi = hist_vp_.size () - 1; // Bin into the histogram hist_vp_ [fi] += hist_incr; } data_size += hist_f4_.size (); // Copy the resultant signature for (int d = 0; d < hist_vp_.size (); ++d) output.points[0].histogram[d + data_size] = hist_vp_[d]; }
template<typename PointT, typename PointNT, typename PointLT> void pcl::OrganizedMultiPlaneSegmentation<PointT, PointNT, PointLT>::segment (std::vector<ModelCoefficients>& model_coefficients, std::vector<PointIndices>& inlier_indices, std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >& centroids, std::vector <Eigen::Matrix3f, Eigen::aligned_allocator<Eigen::Matrix3f> >& covariances, pcl::PointCloud<PointLT>& labels, std::vector<pcl::PointIndices>& label_indices) { if (!initCompute ()) return; // Check that we got the same number of points and normals if (static_cast<int> (normals_->points.size ()) != static_cast<int> (input_->points.size ())) { PCL_ERROR ("[pcl::%s::segment] Number of points in input cloud (%zu) and normal cloud (%zu) do not match!\n", getClassName ().c_str (), input_->points.size (), normals_->points.size ()); return; } // Check that the cloud is organized if (!input_->isOrganized ()) { PCL_ERROR ("[pcl::%s::segment] Organized point cloud is required for this plane extraction method!\n", getClassName ().c_str ()); return; } // Calculate range part of planes' hessian normal form std::vector<float> plane_d (input_->points.size ()); for (unsigned int i = 0; i < input_->size (); ++i) plane_d[i] = input_->points[i].getVector3fMap ().dot (normals_->points[i].getNormalVector3fMap ()); // Make a comparator //PlaneCoefficientComparator<PointT,PointNT> plane_comparator (plane_d); compare_->setPlaneCoeffD (plane_d); compare_->setInputCloud (input_); compare_->setInputNormals (normals_); compare_->setAngularThreshold (static_cast<float> (angular_threshold_)); compare_->setDistanceThreshold (static_cast<float> (distance_threshold_), true); // Set up the output OrganizedConnectedComponentSegmentation<PointT,pcl::Label> connected_component (compare_); connected_component.setInputCloud (input_); connected_component.segment (labels, label_indices); Eigen::Vector4f clust_centroid = Eigen::Vector4f::Zero (); Eigen::Vector4f vp = Eigen::Vector4f::Zero (); Eigen::Matrix3f clust_cov; pcl::ModelCoefficients model; model.values.resize (4); // Fit Planes to each cluster for (size_t i = 0; i < label_indices.size (); i++) { if (static_cast<unsigned> (label_indices[i].indices.size ()) > min_inliers_) { pcl::computeMeanAndCovarianceMatrix (*input_, label_indices[i].indices, clust_cov, clust_centroid); Eigen::Vector4f plane_params; EIGEN_ALIGN16 Eigen::Vector3f::Scalar eigen_value; EIGEN_ALIGN16 Eigen::Vector3f eigen_vector; pcl::eigen33 (clust_cov, eigen_value, eigen_vector); plane_params[0] = eigen_vector[0]; plane_params[1] = eigen_vector[1]; plane_params[2] = eigen_vector[2]; plane_params[3] = 0; plane_params[3] = -1 * plane_params.dot (clust_centroid); vp -= clust_centroid; float cos_theta = vp.dot (plane_params); if (cos_theta < 0) { plane_params *= -1; plane_params[3] = 0; plane_params[3] = -1 * plane_params.dot (clust_centroid); } // Compute the curvature surface change float curvature; float eig_sum = clust_cov.coeff (0) + clust_cov.coeff (4) + clust_cov.coeff (8); if (eig_sum != 0) curvature = fabsf (eigen_value / eig_sum); else curvature = 0; if (curvature < maximum_curvature_) { model.values[0] = plane_params[0]; model.values[1] = plane_params[1]; model.values[2] = plane_params[2]; model.values[3] = plane_params[3]; model_coefficients.push_back (model); inlier_indices.push_back (label_indices[i]); centroids.push_back (clust_centroid); covariances.push_back (clust_cov); } } } deinitCompute (); }
template <typename PointT, typename PointNT> void pcl::SampleConsensusModelCone<PointT, PointNT>::projectPoints ( const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields) { // Needs a valid set of model coefficients if (model_coefficients.size () != 7) { PCL_ERROR ("[pcl::SampleConsensusModelCone::projectPoints] Invalid number of model coefficients given (%lu)!\n", model_coefficients.size ()); return; } projected_points.header = input_->header; projected_points.is_dense = input_->is_dense; Eigen::Vector4f apex (model_coefficients[0], model_coefficients[1], model_coefficients[2], 0); Eigen::Vector4f axis_dir (model_coefficients[3], model_coefficients[4], model_coefficients[5], 0); float opening_angle = model_coefficients[6]; float apexdotdir = apex.dot (axis_dir); float dirdotdir = 1.0f / axis_dir.dot (axis_dir); // Copy all the data fields from the input cloud to the projected one? if (copy_data_fields) { // Allocate enough space and copy the basics projected_points.points.resize (input_->points.size ()); projected_points.width = input_->width; projected_points.height = input_->height; typedef typename pcl::traits::fieldList<PointT>::type FieldList; // Iterate over each point for (size_t i = 0; i < projected_points.points.size (); ++i) // Iterate over each dimension pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[i], projected_points.points[i])); // Iterate through the 3d points and calculate the distances from them to the cone for (size_t i = 0; i < inliers.size (); ++i) { Eigen::Vector4f pt (input_->points[inliers[i]].x, input_->points[inliers[i]].y, input_->points[inliers[i]].z, 1); float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir; pcl::Vector4fMap pp = projected_points.points[inliers[i]].getVector4fMap (); pp.matrix () = apex + k * axis_dir; Eigen::Vector4f dir = pt - pp; dir.normalize (); // Calculate the actual radius of the cone at the level of the projected point Eigen::Vector4f height = apex - pp; float actual_cone_radius = tanf (opening_angle) * height.norm (); // Calculate the projection of the point onto the cone pp += dir * actual_cone_radius; } } else { // Allocate enough space and copy the basics projected_points.points.resize (inliers.size ()); projected_points.width = static_cast<uint32_t> (inliers.size ()); projected_points.height = 1; typedef typename pcl::traits::fieldList<PointT>::type FieldList; // Iterate over each point for (size_t i = 0; i < inliers.size (); ++i) // Iterate over each dimension pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[inliers[i]], projected_points.points[i])); // Iterate through the 3d points and calculate the distances from them to the cone for (size_t i = 0; i < inliers.size (); ++i) { pcl::Vector4fMap pp = projected_points.points[i].getVector4fMap (); pcl::Vector4fMapConst pt = input_->points[inliers[i]].getVector4fMap (); float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir; // Calculate the projection of the point on the line pp.matrix () = apex + k * axis_dir; Eigen::Vector4f dir = pt - pp; dir.normalize (); // Calculate the actual radius of the cone at the level of the projected point Eigen::Vector4f height = apex - pp; float actual_cone_radius = tanf (opening_angle) * height.norm (); // Calculate the projection of the point onto the cone pp += dir * actual_cone_radius; } } }
template <typename PointInT, typename PointNT, typename PointOutT> bool pcl::FPFHEstimation<PointInT, PointNT, PointOutT>::computePairFeatures ( const pcl::PointCloud<PointInT> &cloud, const pcl::PointCloud<PointNT> &normals, int p_idx, int q_idx, float &f1, float &f2, float &f3, float &f4) { // Compute the Cartesian difference between the two points Eigen::Vector4f delta = cloud.points[q_idx].getVector4fMap () - cloud.points[p_idx].getVector4fMap (); delta[3] = 0; // Compute the Euclidean norm = || p_idx - q_idx || float distance_sqr = delta.squaredNorm (); if (distance_sqr == 0) { ROS_ERROR ("Euclidean distance between points %d and %d is 0!", p_idx, q_idx); f1 = f2 = f3 = f4 = 0; return (false); } // Estimate f4 = || delta || f4 = sqrt (distance_sqr); // Create a Darboux frame coordinate system u-v-w // u = n1; v = (p_idx - q_idx) x u / || (p_idx - q_idx) x u ||; w = u x v pcl::Vector4fMapConst u = normals.points[p_idx].getNormalVector4fMap (); // Estimate f3 = u * delta / || delta || // delta[3] = 0 (line 59) f3 = u.dot (delta) / f4; // v = delta * u Eigen::Vector4f v = Eigen::Vector4f::Zero (); v = delta.cross3 (u); distance_sqr = v.squaredNorm (); if (distance_sqr == 0) { ROS_ERROR ("Norm of Delta x U is 0 for point %d and %d!", p_idx, q_idx); f1 = f2 = f3 = f4 = 0; return (false); } // Copy the q_idx normal Eigen::Vector4f nq (normals.points[q_idx].normal_x, normals.points[q_idx].normal_y, normals.points[q_idx].normal_z, 0); // Normalize the vector v /= sqrt (distance_sqr); // Compute delta (w) = u x v delta = u.cross3 (v); // Compute f2 = v * n2; // v[3] = 0 (line 82) f2 = v.dot (nq); // Compute f1 = arctan (w * n2, u * n2) i.e. angle of n2 in the x=u, y=w coordinate system // delta[3] = 0 (line 59), nq[3] = 0 (line 97) f1 = atan2f (delta.dot (nq), u.dot (nq)); // @todo: optimize this return (true); }
template <typename PointT> void pcl::ExtractPolygonalPrismData<PointT>::segment (pcl::PointIndices &output) { output.header = input_->header; if (!initCompute ()) { output.indices.clear (); return; } if (static_cast<int> (planar_hull_->points.size ()) < min_pts_hull_) { PCL_ERROR ("[pcl::%s::segment] Not enough points (%zu) in the hull!\n", getClassName ().c_str (), planar_hull_->points.size ()); output.indices.clear (); return; } // Compute the plane coefficients Eigen::Vector4f model_coefficients; EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; Eigen::Vector4f xyz_centroid; computeMeanAndCovarianceMatrix (*planar_hull_, covariance_matrix, xyz_centroid); // Compute the model coefficients EIGEN_ALIGN16 Eigen::Vector3f::Scalar eigen_value; EIGEN_ALIGN16 Eigen::Vector3f eigen_vector; eigen33 (covariance_matrix, eigen_value, eigen_vector); model_coefficients[0] = eigen_vector [0]; model_coefficients[1] = eigen_vector [1]; model_coefficients[2] = eigen_vector [2]; model_coefficients[3] = 0; // Hessian form (D = nc . p_plane (centroid here) + p) model_coefficients[3] = -1 * model_coefficients.dot (xyz_centroid); // Need to flip the plane normal towards the viewpoint Eigen::Vector4f vp (vpx_, vpy_, vpz_, 0); // See if we need to flip any plane normals vp -= planar_hull_->points[0].getVector4fMap (); vp[3] = 0; // Dot product between the (viewpoint - point) and the plane normal float cos_theta = vp.dot (model_coefficients); // Flip the plane normal if (cos_theta < 0) { model_coefficients *= -1; model_coefficients[3] = 0; // Hessian form (D = nc . p_plane (centroid here) + p) model_coefficients[3] = -1 * (model_coefficients.dot (planar_hull_->points[0].getVector4fMap ())); } // Project all points PointCloud projected_points; SampleConsensusModelPlane<PointT> sacmodel (input_); sacmodel.projectPoints (*indices_, model_coefficients, projected_points, false); // Create a X-Y projected representation for within bounds polygonal checking int k0, k1, k2; // Determine the best plane to project points onto k0 = (fabs (model_coefficients[0] ) > fabs (model_coefficients[1])) ? 0 : 1; k0 = (fabs (model_coefficients[k0]) > fabs (model_coefficients[2])) ? k0 : 2; k1 = (k0 + 1) % 3; k2 = (k0 + 2) % 3; // Project the convex hull pcl::PointCloud<PointT> polygon; polygon.points.resize (planar_hull_->points.size ()); for (size_t i = 0; i < planar_hull_->points.size (); ++i) { Eigen::Vector4f pt (planar_hull_->points[i].x, planar_hull_->points[i].y, planar_hull_->points[i].z, 0); polygon.points[i].x = pt[k1]; polygon.points[i].y = pt[k2]; polygon.points[i].z = 0; } PointT pt_xy; pt_xy.z = 0; output.indices.resize (indices_->size ()); int l = 0; for (size_t i = 0; i < projected_points.points.size (); ++i) { // Check the distance to the user imposed limits from the table planar model double distance = pointToPlaneDistanceSigned (input_->points[(*indices_)[i]], model_coefficients); if (distance < height_limit_min_ || distance > height_limit_max_) continue; // Check what points are inside the hull Eigen::Vector4f pt (projected_points.points[i].x, projected_points.points[i].y, projected_points.points[i].z, 0); pt_xy.x = pt[k1]; pt_xy.y = pt[k2]; if (!pcl::isXYPointIn2DXYPolygon (pt_xy, polygon)) continue; output.indices[l++] = (*indices_)[i]; } output.indices.resize (l); deinitCompute (); }
TEST (PCL, BoundaryEstimation) { Eigen::Vector4f u = Eigen::Vector4f::Zero (); Eigen::Vector4f v = Eigen::Vector4f::Zero (); // Estimate normals first NormalEstimation<PointXYZ, Normal> n; PointCloud<Normal>::Ptr normals (new PointCloud<Normal> ()); // set parameters n.setInputCloud (cloud.makeShared ()); boost::shared_ptr<vector<int> > indicesptr (new vector<int> (indices)); n.setIndices (indicesptr); n.setSearchMethod (tree); n.setKSearch (static_cast<int> (indices.size ())); // estimate n.compute (*normals); BoundaryEstimation<PointXYZ, Normal, Boundary> b; b.setInputNormals (normals); EXPECT_EQ (b.getInputNormals (), normals); // getCoordinateSystemOnPlane for (size_t i = 0; i < normals->points.size (); ++i) { b.getCoordinateSystemOnPlane (normals->points[i], u, v); Vector4fMap n4uv = normals->points[i].getNormalVector4fMap (); EXPECT_NEAR (n4uv.dot(u), 0, 1e-4); EXPECT_NEAR (n4uv.dot(v), 0, 1e-4); EXPECT_NEAR (u.dot(v), 0, 1e-4); } // isBoundaryPoint (indices) bool pt = false; pt = b.isBoundaryPoint (cloud, 0, indices, u, v, float (M_PI) / 2.0); EXPECT_EQ (pt, false); pt = b.isBoundaryPoint (cloud, static_cast<int> (indices.size ()) / 3, indices, u, v, float (M_PI) / 2.0); EXPECT_EQ (pt, false); pt = b.isBoundaryPoint (cloud, static_cast<int> (indices.size ()) / 2, indices, u, v, float (M_PI) / 2.0); EXPECT_EQ (pt, false); pt = b.isBoundaryPoint (cloud, static_cast<int> (indices.size ()) - 1, indices, u, v, float (M_PI) / 2.0); EXPECT_EQ (pt, true); // isBoundaryPoint (points) pt = false; pt = b.isBoundaryPoint (cloud, cloud.points[0], indices, u, v, float (M_PI) / 2.0); EXPECT_EQ (pt, false); pt = b.isBoundaryPoint (cloud, cloud.points[indices.size () / 3], indices, u, v, float (M_PI) / 2.0); EXPECT_EQ (pt, false); pt = b.isBoundaryPoint (cloud, cloud.points[indices.size () / 2], indices, u, v, float (M_PI) / 2.0); EXPECT_EQ (pt, false); pt = b.isBoundaryPoint (cloud, cloud.points[indices.size () - 1], indices, u, v, float (M_PI) / 2.0); EXPECT_EQ (pt, true); // Object PointCloud<Boundary>::Ptr bps (new PointCloud<Boundary> ()); // set parameters b.setInputCloud (cloud.makeShared ()); b.setIndices (indicesptr); b.setSearchMethod (tree); b.setKSearch (static_cast<int> (indices.size ())); // estimate b.compute (*bps); EXPECT_EQ (bps->points.size (), indices.size ()); pt = bps->points[0].boundary_point; EXPECT_EQ (pt, false); pt = bps->points[indices.size () / 3].boundary_point; EXPECT_EQ (pt, false); pt = bps->points[indices.size () / 2].boundary_point; EXPECT_EQ (pt, false); pt = bps->points[indices.size () - 1].boundary_point; EXPECT_EQ (pt, true); }
void pcl_ros::ConvexHull2D::input_indices_callback (const PointCloudConstPtr &cloud, const PointIndicesConstPtr &indices) { // No subscribers, no work if (pub_output_.getNumSubscribers () <= 0 && pub_plane_.getNumSubscribers () <= 0) return; PointCloud output; // If cloud is given, check if it's valid if (!isValid (cloud)) { NODELET_ERROR ("[%s::input_indices_callback] Invalid input!", getName ().c_str ()); // Publish an empty message output.header = cloud->header; pub_output_.publish (output.makeShared ()); return; } // If indices are given, check if they are valid if (indices && !isValid (indices, "indices")) { NODELET_ERROR ("[%s::input_indices_callback] Invalid indices!", getName ().c_str ()); // Publish an empty message output.header = cloud->header; pub_output_.publish (output.makeShared ()); return; } /// DEBUG if (indices) NODELET_DEBUG ("[%s::input_indices_model_callback]\n" " - PointCloud with %d data points (%s), stamp %f, and frame %s on topic %s received.\n" " - PointIndices with %zu values, stamp %f, and frame %s on topic %s received.", getName ().c_str (), cloud->width * cloud->height, pcl::getFieldsList (*cloud).c_str (), cloud->header.stamp.toSec (), cloud->header.frame_id.c_str (), getMTPrivateNodeHandle ().resolveName ("input").c_str (), indices->indices.size (), indices->header.stamp.toSec (), indices->header.frame_id.c_str (), getMTPrivateNodeHandle ().resolveName ("indices").c_str ()); else NODELET_DEBUG ("[%s::input_indices_callback] PointCloud with %d data points, stamp %f, and frame %s on topic %s received.", getName ().c_str (), cloud->width * cloud->height, cloud->header.stamp.toSec (), cloud->header.frame_id.c_str (), getMTPrivateNodeHandle ().resolveName ("input").c_str ()); // Reset the indices and surface pointers IndicesPtr indices_ptr; if (indices) indices_ptr.reset (new std::vector<int> (indices->indices)); impl_.setInputCloud (cloud); impl_.setIndices (indices_ptr); // Estimate the feature impl_.reconstruct (output); // If more than 3 points are present, send a PolygonStamped hull too if (output.points.size () >= 3) { geometry_msgs::PolygonStamped poly; poly.header = output.header; poly.polygon.points.resize (output.points.size ()); // Get three consecutive points (without copying) pcl::Vector4fMap O = output.points[1].getVector4fMap (); pcl::Vector4fMap B = output.points[0].getVector4fMap (); pcl::Vector4fMap A = output.points[2].getVector4fMap (); // Check the direction of points -- polygon must have CCW direction Eigen::Vector4f OA = A - O; Eigen::Vector4f OB = B - O; Eigen::Vector4f N = OA.cross3 (OB); double theta = N.dot (O); bool reversed = false; if (theta < (M_PI / 2.0)) reversed = true; for (size_t i = 0; i < output.points.size (); ++i) { if (reversed) { size_t j = output.points.size () - i - 1; poly.polygon.points[i].x = output.points[j].x; poly.polygon.points[i].y = output.points[j].y; poly.polygon.points[i].z = output.points[j].z; } else { poly.polygon.points[i].x = output.points[i].x; poly.polygon.points[i].y = output.points[i].y; poly.polygon.points[i].z = output.points[i].z; } } pub_plane_.publish (boost::make_shared<const geometry_msgs::PolygonStamped> (poly)); } // Publish a Boost shared ptr const data output.header = cloud->header; pub_output_.publish (output.makeShared ()); }
bool pcl::computeRGBPairFeatures (const Eigen::Vector4f &p1, const Eigen::Vector4f &n1, const Eigen::Vector4i &colors1, const Eigen::Vector4f &p2, const Eigen::Vector4f &n2, const Eigen::Vector4i &colors2, float &f1, float &f2, float &f3, float &f4, float &f5, float &f6, float &f7) { Eigen::Vector4f dp2p1 = p2 - p1; dp2p1[3] = 0.0f; f4 = dp2p1.norm (); if (f4 == 0.0f) { PCL_ERROR ("Euclidean distance between points is 0!\n"); f1 = f2 = f3 = f4 = 0.0f; return (false); } Eigen::Vector4f n1_copy = n1, n2_copy = n2; n1_copy[3] = n2_copy[3] = 0.0f; float angle1 = n1_copy.dot (dp2p1) / f4; f3 = angle1; // Create a Darboux frame coordinate system u-v-w // u = n1; v = (p_idx - q_idx) x u / || (p_idx - q_idx) x u ||; w = u x v Eigen::Vector4f v = dp2p1.cross3 (n1_copy); v[3] = 0.0f; float v_norm = v.norm (); if (v_norm == 0.0f) { PCL_ERROR ("Norm of Delta x U is 0!\n"); f1 = f2 = f3 = f4 = 0.0f; return (false); } // Normalize v v /= v_norm; Eigen::Vector4f w = n1_copy.cross3 (v); // Do not have to normalize w - it is a unit vector by construction v[3] = 0.0f; f2 = v.dot (n2_copy); w[3] = 0.0f; // Compute f1 = arctan (w * n2, u * n2) i.e. angle of n2 in the x=u, y=w coordinate system f1 = atan2f (w.dot (n2_copy), n1_copy.dot (n2_copy)); // @todo optimize this // everything before was standard 4D-Darboux frame feature pair // now, for the experimental color stuff f5 = ((float) colors1[0]) / colors2[0]; f6 = ((float) colors1[1]) / colors2[1]; f7 = ((float) colors1[2]) / colors2[2]; // make sure the ratios are in the [-1, 1] interval if (f5 > 1.0f) f5 = - 1.0f / f5; if (f6 > 1.0f) f6 = - 1.0f / f6; if (f7 > 1.0f) f7 = - 1.0f / f7; return (true); }
template <typename PointT> void pcl::SampleConsensusModelStick<PointT>::projectPoints ( const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields) const { // Needs a valid model coefficients if (!isModelValid (model_coefficients)) return; // Obtain the line point and direction Eigen::Vector4f line_pt (model_coefficients[0], model_coefficients[1], model_coefficients[2], 0); Eigen::Vector4f line_dir (model_coefficients[3], model_coefficients[4], model_coefficients[5], 0); projected_points.header = input_->header; projected_points.is_dense = input_->is_dense; // Copy all the data fields from the input cloud to the projected one? if (copy_data_fields) { // Allocate enough space and copy the basics projected_points.points.resize (input_->points.size ()); projected_points.width = input_->width; projected_points.height = input_->height; typedef typename pcl::traits::fieldList<PointT>::type FieldList; // Iterate over each point for (size_t i = 0; i < projected_points.points.size (); ++i) // Iterate over each dimension pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[i], projected_points.points[i])); // Iterate through the 3d points and calculate the distances from them to the line for (size_t i = 0; i < inliers.size (); ++i) { Eigen::Vector4f pt (input_->points[inliers[i]].x, input_->points[inliers[i]].y, input_->points[inliers[i]].z, 0); // double k = (DOT_PROD_3D (points[i], p21) - dotA_B) / dotB_B; float k = (pt.dot (line_dir) - line_pt.dot (line_dir)) / line_dir.dot (line_dir); Eigen::Vector4f pp = line_pt + k * line_dir; // Calculate the projection of the point on the line (pointProj = A + k * B) projected_points.points[inliers[i]].x = pp[0]; projected_points.points[inliers[i]].y = pp[1]; projected_points.points[inliers[i]].z = pp[2]; } } else { // Allocate enough space and copy the basics projected_points.points.resize (inliers.size ()); projected_points.width = static_cast<uint32_t> (inliers.size ()); projected_points.height = 1; typedef typename pcl::traits::fieldList<PointT>::type FieldList; // Iterate over each point for (size_t i = 0; i < inliers.size (); ++i) // Iterate over each dimension pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[inliers[i]], projected_points.points[i])); // Iterate through the 3d points and calculate the distances from them to the line for (size_t i = 0; i < inliers.size (); ++i) { Eigen::Vector4f pt (input_->points[inliers[i]].x, input_->points[inliers[i]].y, input_->points[inliers[i]].z, 0); // double k = (DOT_PROD_3D (points[i], p21) - dotA_B) / dotB_B; float k = (pt.dot (line_dir) - line_pt.dot (line_dir)) / line_dir.dot (line_dir); Eigen::Vector4f pp = line_pt + k * line_dir; // Calculate the projection of the point on the line (pointProj = A + k * B) projected_points.points[i].x = pp[0]; projected_points.points[i].y = pp[1]; projected_points.points[i].z = pp[2]; } } }
template <typename PointT, typename PointNT> void pcl::SampleConsensusModelCone<PointT, PointNT>::selectWithinDistance ( const Eigen::VectorXf &model_coefficients, const double threshold, std::vector<int> &inliers) { // Check if the model is valid given the user constraints if (!isModelValid (model_coefficients)) { inliers.clear (); return; } int nr_p = 0; inliers.resize (indices_->size ()); error_sqr_dists_.resize (indices_->size ()); Eigen::Vector4f apex (model_coefficients[0], model_coefficients[1], model_coefficients[2], 0); Eigen::Vector4f axis_dir (model_coefficients[3], model_coefficients[4], model_coefficients[5], 0); float opening_angle = model_coefficients[6]; float apexdotdir = apex.dot (axis_dir); float dirdotdir = 1.0f / axis_dir.dot (axis_dir); // Iterate through the 3d points and calculate the distances from them to the cone for (size_t i = 0; i < indices_->size (); ++i) { Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0); Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0); // Calculate the point's projection on the cone axis float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir; Eigen::Vector4f pt_proj = apex + k * axis_dir; // Calculate the direction of the point from center Eigen::Vector4f pp_pt_dir = pt - pt_proj; pp_pt_dir.normalize (); // Calculate the actual radius of the cone at the level of the projected point Eigen::Vector4f height = apex - pt_proj; double actual_cone_radius = tan(opening_angle) * height.norm (); height.normalize (); // Calculate the cones perfect normals Eigen::Vector4f cone_normal = sinf (opening_angle) * height + cosf (opening_angle) * pp_pt_dir; // Aproximate the distance from the point to the cone as the difference between // dist(point,cone_axis) and actual cone radius double d_euclid = fabs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius); // Calculate the angular distance between the point normal and the (dir=pt_proj->pt) vector double d_normal = fabs (getAngle3D (n, cone_normal)); d_normal = (std::min) (d_normal, M_PI - d_normal); double distance = fabs (normal_distance_weight_ * d_normal + (1 - normal_distance_weight_) * d_euclid); if (distance < threshold) { // Returns the indices of the points whose distances are smaller than the threshold inliers[nr_p] = (*indices_)[i]; error_sqr_dists_[nr_p] = distance; ++nr_p; } } inliers.resize (nr_p); error_sqr_dists_.resize (nr_p); }
template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void pcl::SHOTEstimationBase<PointInT, PointNT, PointOutT, PointRFT>::interpolateSingleChannel ( const std::vector<int> &indices, const std::vector<float> &sqr_dists, const int index, std::vector<double> &binDistance, const int nr_bins, Eigen::VectorXf &shot) { const Eigen::Vector4f& central_point = (*input_)[(*indices_)[index]].getVector4fMap (); const PointRFT& current_frame = (*frames_)[index]; Eigen::Vector4f current_frame_x (current_frame.x_axis[0], current_frame.x_axis[1], current_frame.x_axis[2], 0); Eigen::Vector4f current_frame_y (current_frame.y_axis[0], current_frame.y_axis[1], current_frame.y_axis[2], 0); Eigen::Vector4f current_frame_z (current_frame.z_axis[0], current_frame.z_axis[1], current_frame.z_axis[2], 0); for (size_t i_idx = 0; i_idx < indices.size (); ++i_idx) { if (!pcl_isfinite(binDistance[i_idx])) continue; Eigen::Vector4f delta = surface_->points[indices[i_idx]].getVector4fMap () - central_point; delta[3] = 0; // Compute the Euclidean norm double distance = sqrt (sqr_dists[i_idx]); if (areEquals (distance, 0.0)) continue; double xInFeatRef = delta.dot (current_frame_x); double yInFeatRef = delta.dot (current_frame_y); double zInFeatRef = delta.dot (current_frame_z); // To avoid numerical problems afterwards if (fabs (yInFeatRef) < 1E-30) yInFeatRef = 0; if (fabs (xInFeatRef) < 1E-30) xInFeatRef = 0; if (fabs (zInFeatRef) < 1E-30) zInFeatRef = 0; unsigned char bit4 = ((yInFeatRef > 0) || ((yInFeatRef == 0.0) && (xInFeatRef < 0))) ? 1 : 0; unsigned char bit3 = static_cast<unsigned char> (((xInFeatRef > 0) || ((xInFeatRef == 0.0) && (yInFeatRef > 0))) ? !bit4 : bit4); assert (bit3 == 0 || bit3 == 1); int desc_index = (bit4<<3) + (bit3<<2); desc_index = desc_index << 1; if ((xInFeatRef * yInFeatRef > 0) || (xInFeatRef == 0.0)) desc_index += (fabs (xInFeatRef) >= fabs (yInFeatRef)) ? 0 : 4; else desc_index += (fabs (xInFeatRef) > fabs (yInFeatRef)) ? 4 : 0; desc_index += zInFeatRef > 0 ? 1 : 0; // 2 RADII desc_index += (distance > radius1_2_) ? 2 : 0; int step_index = static_cast<int>(floor (binDistance[i_idx] +0.5)); int volume_index = desc_index * (nr_bins+1); //Interpolation on the cosine (adjacent bins in the histogram) binDistance[i_idx] -= step_index; double intWeight = (1- fabs (binDistance[i_idx])); if (binDistance[i_idx] > 0) shot[volume_index + ((step_index+1) % nr_bins)] += static_cast<float> (binDistance[i_idx]); else shot[volume_index + ((step_index - 1 + nr_bins) % nr_bins)] += - static_cast<float> (binDistance[i_idx]); //Interpolation on the distance (adjacent husks) if (distance > radius1_2_) //external sphere { double radiusDistance = (distance - radius3_4_) / radius1_2_; if (distance > radius3_4_) //most external sector, votes only for itself intWeight += 1 - radiusDistance; //peso=1-d else //3/4 of radius, votes also for the internal sphere { intWeight += 1 + radiusDistance; shot[(desc_index - 2) * (nr_bins+1) + step_index] -= static_cast<float> (radiusDistance); } } else //internal sphere { double radiusDistance = (distance - radius1_4_) / radius1_2_; if (distance < radius1_4_) //most internal sector, votes only for itself intWeight += 1 + radiusDistance; //weight=1-d else //3/4 of radius, votes also for the external sphere { intWeight += 1 - radiusDistance; shot[(desc_index + 2) * (nr_bins+1) + step_index] += static_cast<float> (radiusDistance); } } //Interpolation on the inclination (adjacent vertical volumes) double inclinationCos = zInFeatRef / distance; if (inclinationCos < - 1.0) inclinationCos = - 1.0; if (inclinationCos > 1.0) inclinationCos = 1.0; double inclination = acos (inclinationCos); assert (inclination >= 0.0 && inclination <= PST_RAD_180); if (inclination > PST_RAD_90 || (fabs (inclination - PST_RAD_90) < 1e-30 && zInFeatRef <= 0)) { double inclinationDistance = (inclination - PST_RAD_135) / PST_RAD_90; if (inclination > PST_RAD_135) intWeight += 1 - inclinationDistance; else { intWeight += 1 + inclinationDistance; assert ((desc_index + 1) * (nr_bins+1) + step_index >= 0 && (desc_index + 1) * (nr_bins+1) + step_index < descLength_); shot[(desc_index + 1) * (nr_bins+1) + step_index] -= static_cast<float> (inclinationDistance); } } else { double inclinationDistance = (inclination - PST_RAD_45) / PST_RAD_90; if (inclination < PST_RAD_45) intWeight += 1 + inclinationDistance; else { intWeight += 1 - inclinationDistance; assert ((desc_index - 1) * (nr_bins+1) + step_index >= 0 && (desc_index - 1) * (nr_bins+1) + step_index < descLength_); shot[(desc_index - 1) * (nr_bins+1) + step_index] += static_cast<float> (inclinationDistance); } } if (yInFeatRef != 0.0 || xInFeatRef != 0.0) { //Interpolation on the azimuth (adjacent horizontal volumes) double azimuth = atan2 (yInFeatRef, xInFeatRef); int sel = desc_index >> 2; double angularSectorSpan = PST_RAD_45; double angularSectorStart = - PST_RAD_PI_7_8; double azimuthDistance = (azimuth - (angularSectorStart + angularSectorSpan*sel)) / angularSectorSpan; assert ((azimuthDistance < 0.5 || areEquals (azimuthDistance, 0.5)) && (azimuthDistance > - 0.5 || areEquals (azimuthDistance, - 0.5))); azimuthDistance = (std::max)(- 0.5, std::min (azimuthDistance, 0.5)); if (azimuthDistance > 0) { intWeight += 1 - azimuthDistance; int interp_index = (desc_index + 4) % maxAngularSectors_; assert (interp_index * (nr_bins+1) + step_index >= 0 && interp_index * (nr_bins+1) + step_index < descLength_); shot[interp_index * (nr_bins+1) + step_index] += static_cast<float> (azimuthDistance); } else { int interp_index = (desc_index - 4 + maxAngularSectors_) % maxAngularSectors_; assert (interp_index * (nr_bins+1) + step_index >= 0 && interp_index * (nr_bins+1) + step_index < descLength_); intWeight += 1 + azimuthDistance; shot[interp_index * (nr_bins+1) + step_index] -= static_cast<float> (azimuthDistance); } } assert (volume_index + step_index >= 0 && volume_index + step_index < descLength_); shot[volume_index + step_index] += static_cast<float> (intWeight); }
////////////////////////////////////////////////////////////////////////////////////////////// // Quadrilinear interpolation; used when color and shape descriptions are NOT activated simultaneously template <typename PointInT, typename PointNT, typename PointOutT> void pcl::SHOTEstimationBase<PointInT, PointNT, PointOutT>::interpolateSingleChannel ( const pcl::PointCloud<PointInT> &cloud, const std::vector<int> &indices, const std::vector<float> &dists, const Eigen::Vector4f ¢ral_point, const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > &rf, std::vector<double> &binDistance, const int nr_bins, Eigen::VectorXf &shot) { if (rf.size () != 3) { PCL_ERROR ("[pcl::%s::interpolateSingleChannel] RF size different than 9! Aborting...\n"); return; } for (size_t i_idx = 0; i_idx < indices.size (); ++i_idx) { Eigen::Vector4f delta = cloud.points[indices[i_idx]].getVector4fMap () - central_point; // Compute the Euclidean norm double distance_sqr = dists[i_idx]; //delta.squaredNorm (); if (areEquals (distance_sqr, 0.0)) continue; double xInFeatRef = delta.dot (rf[0]); //(x * feat[i].rf[0] + y * feat[i].rf[1] + z * feat[i].rf[2]); double yInFeatRef = delta.dot (rf[1]); //(x * feat[i].rf[3] + y * feat[i].rf[4] + z * feat[i].rf[5]); double zInFeatRef = delta.dot (rf[2]); //(x * feat[i].rf[6] + y * feat[i].rf[7] + z * feat[i].rf[8]); // To avoid numerical problems afterwards if (fabs (yInFeatRef) < 1E-30) yInFeatRef = 0; if (fabs (xInFeatRef) < 1E-30) xInFeatRef = 0; if (fabs (zInFeatRef) < 1E-30) zInFeatRef = 0; unsigned char bit4 = ((yInFeatRef > 0) || ((yInFeatRef == 0.0) && (xInFeatRef < 0))) ? 1 : 0; unsigned char bit3 = ((xInFeatRef > 0) || ((xInFeatRef == 0.0) && (yInFeatRef > 0))) ? !bit4 : bit4; assert (bit3 == 0 || bit3 == 1); int desc_index = (bit4<<3) + (bit3<<2); desc_index = desc_index << 1; if ((xInFeatRef * yInFeatRef > 0) || (xInFeatRef == 0.0)) desc_index += (fabs (xInFeatRef) >= fabs (yInFeatRef)) ? 0 : 4; else desc_index += (fabs (xInFeatRef) > fabs (yInFeatRef)) ? 4 : 0; desc_index += zInFeatRef > 0 ? 1 : 0; // 2 RADII desc_index += (distance_sqr > sqradius4_) ? 2 : 0; int step_index = static_cast<int>(floor (binDistance[i_idx] +0.5)); int volume_index = desc_index * (nr_bins+1); //Interpolation on the cosine (adjacent bins in the histogram) binDistance[i_idx] -= step_index; double intWeight = (1- fabs (binDistance[i_idx])); if (binDistance[i_idx] > 0) shot[volume_index + ((step_index+1) % nr_bins)] += binDistance[i_idx]; else shot[volume_index + ((step_index - 1 + nr_bins) % nr_bins)] += - binDistance[i_idx]; //Interpolation on the distance (adjacent husks) double distance = sqrt (dists[i_idx]); //sqrt(distance_sqr); if (distance > radius1_2_) //external sphere { double radiusDistance = (distance - radius3_4_) / radius1_2_; if (distance > radius3_4_) //most external sector, votes only for itself intWeight += 1 - radiusDistance; //peso=1-d else //3/4 of radius, votes also for the internal sphere { intWeight += 1 + radiusDistance; shot[(desc_index - 2) * (nr_bins+1) + step_index] -= radiusDistance; } } else //internal sphere { double radiusDistance = (distance - radius1_4_) / radius1_2_; if (distance < radius1_4_) //most internal sector, votes only for itself intWeight += 1 + radiusDistance; //weight=1-d else //3/4 of radius, votes also for the external sphere { intWeight += 1 - radiusDistance; shot[(desc_index + 2) * (nr_bins+1) + step_index] += radiusDistance; } } //Interpolation on the inclination (adjacent vertical volumes) double inclinationCos = zInFeatRef / distance; if (inclinationCos < - 1.0) inclinationCos = - 1.0; if (inclinationCos > 1.0) inclinationCos = 1.0; double inclination = acos (inclinationCos); assert (inclination >= 0.0 && inclination <= PST_RAD_180); if (inclination > PST_RAD_90 || (fabs (inclination - PST_RAD_90) < 1e-30 && zInFeatRef <= 0)) { double inclinationDistance = (inclination - PST_RAD_135) / PST_RAD_90; if (inclination > PST_RAD_135) intWeight += 1 - inclinationDistance; else { intWeight += 1 + inclinationDistance; assert ((desc_index + 1) * (nr_bins+1) + step_index >= 0 && (desc_index + 1) * (nr_bins+1) + step_index < descLength_); shot[(desc_index + 1) * (nr_bins+1) + step_index] -= inclinationDistance; } } else { double inclinationDistance = (inclination - PST_RAD_45) / PST_RAD_90; if (inclination < PST_RAD_45) intWeight += 1 + inclinationDistance; else { intWeight += 1 - inclinationDistance; assert ((desc_index - 1) * (nr_bins+1) + step_index >= 0 && (desc_index - 1) * (nr_bins+1) + step_index < descLength_); shot[(desc_index - 1) * (nr_bins+1) + step_index] += inclinationDistance; } } if (yInFeatRef != 0.0 || xInFeatRef != 0.0) { //Interpolation on the azimuth (adjacent horizontal volumes) double azimuth = atan2 (yInFeatRef, xInFeatRef); int sel = desc_index >> 2; double angularSectorSpan = PST_RAD_45; double angularSectorStart = - PST_RAD_PI_7_8; double azimuthDistance = (azimuth - (angularSectorStart + angularSectorSpan*sel)) / angularSectorSpan; assert ((azimuthDistance < 0.5 || areEquals (azimuthDistance, 0.5)) && (azimuthDistance > - 0.5 || areEquals (azimuthDistance, - 0.5))); azimuthDistance = (std::max)(- 0.5, std::min (azimuthDistance, 0.5)); if (azimuthDistance > 0) { intWeight += 1 - azimuthDistance; int interp_index = (desc_index + 4) % maxAngularSectors_; assert (interp_index * (nr_bins+1) + step_index >= 0 && interp_index * (nr_bins+1) + step_index < descLength_); shot[interp_index * (nr_bins+1) + step_index] += azimuthDistance; } else { int interp_index = (desc_index - 4 + maxAngularSectors_) % maxAngularSectors_; assert (interp_index * (nr_bins+1) + step_index >= 0 && interp_index * (nr_bins+1) + step_index < descLength_); intWeight += 1 + azimuthDistance; shot[interp_index * (nr_bins+1) + step_index] -= azimuthDistance; } } assert (volume_index + step_index >= 0 && volume_index + step_index < descLength_); shot[volume_index + step_index] += intWeight; }