void DataPointsFiltersImpl<T>::MaxQuantileOnAxisDataPointsFilter::inPlaceFilter( DataPoints& cloud) { if (int(dim) >= cloud.features.rows()) throw InvalidParameter((boost::format("MaxQuantileOnAxisDataPointsFilter: Error, filtering on dimension number %1%, larger than feature dimensionality %2%") % dim % cloud.features.rows()).str()); const int nbPointsIn = cloud.features.cols(); const int nbPointsOut = nbPointsIn * ratio; // build array vector<T> values; values.reserve(cloud.features.cols()); for (int x = 0; x < cloud.features.cols(); ++x) values.push_back(cloud.features(dim, x)); // get quartiles value nth_element(values.begin(), values.begin() + (values.size() * ratio), values.end()); const T limit = values[nbPointsOut]; // copy towards beginning the elements we keep int j = 0; for (int i = 0; i < nbPointsIn; i++) { if (cloud.features(dim, i) < limit) { assert(j <= i); cloud.setColFrom(j, cloud, i); j++; } } assert(j <= nbPointsOut); cloud.conservativeResize(j); }
void DataPointsFiltersImpl<T>::BoundingBoxDataPointsFilter::inPlaceFilter( DataPoints& cloud) { const int nbPointsIn = cloud.features.cols(); const int nbRows = cloud.features.rows(); int j = 0; for (int i = 0; i < nbPointsIn; i++) { bool keepPt = false; const Vector point = cloud.features.col(i); // FIXME: improve performance by using Eigen array operations const bool x_in = (point(0) > xMin && point(0) < xMax); const bool y_in = (point(1) > yMin && point(1) < yMax); const bool z_in = (point(2) > zMin && point(2) < zMax) || nbRows == 3; const bool in_box = x_in && y_in && z_in; if(removeInside) keepPt = !in_box; else keepPt = in_box; if(keepPt) { cloud.setColFrom(j, cloud, i); j++; } } cloud.conservativeResize(j); }
void DataPointsFiltersImpl<T>::ShadowDataPointsFilter::inPlaceFilter( DataPoints& cloud) { // Check if normals are present if (!cloud.descriptorExists("normals")) { throw InvalidField("ShadowDataPointsFilter, Error: cannot find normals in descriptors"); } const int dim = cloud.features.rows(); const BOOST_AUTO(normals, cloud.getDescriptorViewByName("normals")); int j = 0; for(int i=0; i < cloud.features.cols(); i++) { const Vector normal = normals.col(i).normalized(); const Vector point = cloud.features.block(0, i, dim-1, 1).normalized(); const T value = anyabs(normal.dot(point)); if(value > eps) // test to keep the points { cloud.features.col(j) = cloud.features.col(i); cloud.descriptors.col(j) = cloud.descriptors.col(i); j++; } } cloud.conservativeResize(j); }
void DataPointsFiltersImpl<T>::MaxDensityDataPointsFilter::inPlaceFilter( DataPoints& cloud) { typedef typename DataPoints::View View; typedef typename DataPoints::ConstView ConstView; // Force densities to be computed if (!cloud.descriptorExists("densities")) { throw InvalidField("MaxDensityDataPointsFilter: Error, no densities found in descriptors."); } const int nbPointsIn = cloud.features.cols(); View densities = cloud.getDescriptorViewByName("densities"); const T lastDensity = densities.maxCoeff(); const int nbSaturatedPts = (densities.cwise() == lastDensity).count(); // fill cloud values int j = 0; for (int i = 0; i < nbPointsIn; i++) { const T density(densities(0,i)); if (density > maxDensity) { const float r = (float)std::rand()/(float)RAND_MAX; float acceptRatio = maxDensity/density; // Handle saturation value of density if (density == lastDensity) { acceptRatio = acceptRatio * (1-nbSaturatedPts/nbPointsIn); } if (r < acceptRatio) { cloud.setColFrom(j, cloud, i); j++; } } else { cloud.setColFrom(j, cloud, i); j++; } } cloud.conservativeResize(j); }
void DataPointsFiltersImpl<T>::RandomSamplingDataPointsFilter::inPlaceFilter( DataPoints& cloud) { const int nbPointsIn = cloud.features.cols(); int j = 0; for (int i = 0; i < nbPointsIn; i++) { const float r = (float)std::rand()/(float)RAND_MAX; if (r < prob) { cloud.setColFrom(j, cloud, i); j++; } } cloud.conservativeResize(j); }
void RemoveNaNDataPointsFilter<T>::inPlaceFilter( DataPoints& cloud) { const int nbPointsIn = cloud.features.cols(); int j = 0; for (int i = 0; i < nbPointsIn; ++i) { const BOOST_AUTO(colArray, cloud.features.col(i).array()); const BOOST_AUTO(hasNaN, !(colArray == colArray).all()); if (!hasNaN) { cloud.setColFrom(j, cloud, i); ++j; } } cloud.conservativeResize(j); }
void DataPointsFiltersImpl<T>::MaxDistDataPointsFilter::inPlaceFilter( DataPoints& cloud) { if (dim >= cloud.features.rows() - 1) { throw InvalidParameter( (boost::format("MaxDistDataPointsFilter: Error, filtering on dimension number %1%, larger than authorized axis id %2%") % dim % (cloud.features.rows() - 2)).str()); } const int nbPointsIn = cloud.features.cols(); const int nbRows = cloud.features.rows(); int j = 0; if(dim == -1) // Euclidean distance { for (int i = 0; i < nbPointsIn; i++) { const T absMaxDist = anyabs(maxDist); if (cloud.features.col(i).head(nbRows-1).norm() < absMaxDist) { cloud.setColFrom(j, cloud, i); j++; } } } else // Single-axis distance { for (int i = 0; i < nbPointsIn; i++) { if ((cloud.features(dim, i)) < maxDist) { cloud.setColFrom(j, cloud, i); j++; } } } cloud.conservativeResize(j); }
void DataPointsFiltersImpl<T>::FixStepSamplingDataPointsFilter::inPlaceFilter( DataPoints& cloud) { const int iStep(step); const int nbPointsIn = cloud.features.cols(); const int phase(rand() % iStep); int j = 0; for (int i = phase; i < nbPointsIn; i += iStep) { cloud.setColFrom(j, cloud, i); j++; } cloud.conservativeResize(j); const double deltaStep(startStep * stepMult - startStep); step *= stepMult; if (deltaStep < 0 && step < endStep) step = endStep; if (deltaStep > 0 && step > endStep) step = endStep; }
void CovarianceSamplingDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud) { const std::size_t featDim(cloud.features.rows()); assert(featDim == 4); //3D pts only //Check number of points const std::size_t nbPoints = cloud.getNbPoints(); if(nbSample >= nbPoints) return; //Check if there is normals info if (!cloud.descriptorExists("normals")) throw InvalidField("OrientNormalsDataPointsFilter: Error, cannot find normals in descriptors."); const auto& normals = cloud.getDescriptorViewByName("normals"); std::vector<std::size_t> keepIndexes; keepIndexes.resize(nbSample); ///---- Part A, as we compare the cloud with himself, the overlap is 100%, so we keep all points //A.1 and A.2 - Compute candidates std::vector<std::size_t> candidates ; candidates.resize(nbPoints); for (std::size_t i = 0; i < nbPoints; ++i) candidates[i] = i; const std::size_t nbCandidates = candidates.size(); //Compute centroid Vector3 center; for(std::size_t i = 0; i < featDim - 1; ++i) center(i) = T(0.); for (std::size_t i = 0; i < nbCandidates; ++i) for (std::size_t f = 0; f <= 3; ++f) center(f) += cloud.features(f,candidates[i]); for(std::size_t i = 0; i <= 3; ++i) center(i) /= T(nbCandidates); //Compute torque normalization T Lnorm = 1.0; if(normalizationMethod == TorqueNormMethod::L1) { Lnorm = 1.0; } else if(normalizationMethod == TorqueNormMethod::Lavg) { Lnorm = 0.0; for (std::size_t i = 0; i < nbCandidates; ++i) Lnorm += (cloud.features.col(candidates[i]).head(3) - center).norm(); Lnorm /= nbCandidates; } else if(normalizationMethod == TorqueNormMethod::Lmax) { const Vector minValues = cloud.features.rowwise().minCoeff(); const Vector maxValues = cloud.features.rowwise().maxCoeff(); const Vector3 radii = maxValues.head(3) - minValues.head(3); Lnorm = radii.maxCoeff() / 2.; //radii.mean() / 2.; } //A.3 - Compute 6x6 covariance matrix + EigenVectors auto computeCovariance = [Lnorm, nbCandidates, &cloud, ¢er, &normals, &candidates](Matrix66 & cov) -> void { //Compute F matrix, see Eq. (4) Eigen::Matrix<T, 6, Eigen::Dynamic> F(6, nbCandidates); for(std::size_t i = 0; i < nbCandidates; ++i) { const Vector3 p = cloud.features.col(candidates[i]).head(3) - center; // pi-c const Vector3 ni = normals.col(candidates[i]).head(3); //compute (1 / L) * (pi - c) x ni F.template block<3, 1>(0, i) = (1. / Lnorm) * p.cross(ni); //set ni part F.template block<3, 1>(3, i) = ni; } // Compute the covariance matrix Cov = FF' cov = F * F.transpose(); }; Matrix66 covariance; computeCovariance(covariance); Eigen::EigenSolver<Matrix66> solver(covariance); const Matrix66 eigenVe = solver.eigenvectors().real(); const Vector6 eigenVa = solver.eigenvalues().real(); ///---- Part B //B.1 - Compute the v-6 for each candidate std::vector<Vector6, Eigen::aligned_allocator<Vector6>> v; // v[i] = [(pi-c) x ni ; ni ]' v.resize(nbCandidates); for(std::size_t i = 0; i < nbCandidates; ++i) { const Vector3 p = cloud.features.col(candidates[i]).head(3) - center; // pi-c const Vector3 ni = normals.col(candidates[i]).head(3); v[i].template block<3, 1>(0, 0) = (1. / Lnorm) * p.cross(ni); v[i].template block<3, 1>(3, 0) = ni; } //B.2 - Compute the 6 sorted list based on dot product (vi . Xk) = magnitude, with Xk the kth-EigenVector std::vector<std::list<std::pair<int, T>>> L; // contain list of pair (index, magnitude) contribution to the eigens vectors L.resize(6); //sort by decreasing magnitude auto comp = [](const std::pair<int, T>& p1, const std::pair<int, T>& p2) -> bool { return p1.second > p2.second; }; for(std::size_t k = 0; k < 6; ++k) { for(std::size_t i = 0; i < nbCandidates; ++i ) { L[k].push_back(std::make_pair(i, std::fabs( v[i].dot(eigenVe.template block<6,1>(0, k)) ))); } L[k].sort(comp); } std::vector<T> t(6, T(0.)); //contains the sums of squared magnitudes std::vector<bool> sampledPoints(nbCandidates, false); //maintain flag to avoid resampling the same point in an other list ///Add point iteratively till we got the desired number of point for(std::size_t i = 0; i < nbSample; ++i) { //B.3 - Equally constrained all eigen vectors // magnitude contribute to t_i where i is the indice of th least contrained eigen vector //Find least constrained vector std::size_t k = 0; for (std::size_t i = 0; i < 6; ++i) { if (t[k] > t[i]) k = i; } // Add the point from the top of the list corresponding to the dimension to the set of samples while(sampledPoints[L[k].front().first]) L[k].pop_front(); //remove already sampled point //Get index to keep const std::size_t idToKeep = static_cast<std::size_t>(L[k].front().first); L[k].pop_front(); sampledPoints[idToKeep] = true; //set flag to avoid resampling //B.4 - Update the running total for (std::size_t k = 0; k < 6; ++k) { const T magnitude = v[idToKeep].dot(eigenVe.template block<6, 1>(0, k)); t[k] += (magnitude * magnitude); } keepIndexes[i] = candidates[idToKeep]; } //TODO: evaluate performances between this solution and sorting the indexes // We build map of (old index to new index), in case we sample pts at the begining of the pointcloud std::unordered_map<std::size_t, std::size_t> mapidx; std::size_t idx = 0; ///(4) Sample the point cloud for(std::size_t id : keepIndexes) { //retrieve index from lookup table if sampling in already switched element if(id<idx) id = mapidx[id]; //Switch columns id and idx cloud.swapCols(idx, id); //Maintain new index position mapidx[idx] = id; //Update index ++idx; } cloud.conservativeResize(nbSample); }