int searchModelWithMEstimator(const Kernel &kernel, int maxNbIterations, typename Kernel::Model* bestModel, double *RMS = 0, double *sigmaMAD_p = 0) { assert(bestModel); const int N = (int)kernel.NumSamples(); const int m = (int)Kernel::MinimumSamples(); // Test if we have sufficient points for the kernel. if (N < m) { return 0; } else if (N == m) { bool ok = searchModel_minimalSamples(kernel, bestModel, 0, RMS); return ok ? 1 : 0; } // Compute a first model on all samples with least squares int hasModel = kernel.ComputeModelFromAllSamples(bestModel); if (!hasModel) { return 0; } InliersVec isInlier(N, true); int nbSuccessfulIterations = kernel.MEstimator(*bestModel, isInlier, maxNbIterations, bestModel, RMS, sigmaMAD_p); if (RMS) { *RMS = kernel.ScalarUnormalize(*RMS); } kernel.Unnormalize(bestModel); return nbSuccessfulIterations; } // searchModelWithMEstimator
bool searchModelLS(const Kernel &kernel, typename Kernel::Model* bestModel, double *RMS = 0) { assert(bestModel); const int N = (int)kernel.NumSamples(); const int m = (int)Kernel::MinimumSamples(); // Test if we have sufficient points for the kernel. if (N < m) { return 0; } else if (N == m) { return searchModel_minimalSamples(kernel, bestModel, 0, RMS); } bool ok = kernel.ComputeModelFromAllSamples(bestModel); if (RMS) { InliersVec isInlier(N); int nInliers = kernel.ComputeInliersForModel(*bestModel, &isInlier, RMS); (void)nInliers; } if (RMS) { *RMS = kernel.ScalarUnormalize(*RMS); } kernel.Unnormalize(bestModel); return ok; } // searchModelWithMEstimator
/** * @brief It segments a cloud using the planes and boundaries previously calculated. A point is considered to be part of a valid object if it is above the plane, * inside the limits of the planes and it is not part of any of the planes. * * @param cloud Point cloud to segment. * @param [out] clusterIndices Valid indices after the segmentation. */ void MultiplePlaneSegmentation::segment(const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr &cloud, std::vector<pcl::PointIndices> &clusterIndices) { std::vector<pcl::ModelCoefficients> coefficients; getCoefficients(coefficients); std::vector<std::vector<pcl::PointXYZRGBA>> boundaries; getBoundaries(boundaries); // Cloud containing the points without the planes. pcl::PointCloud<pcl::PointXYZRGBA>::Ptr remainingCloud = pcl::PointCloud<pcl::PointXYZRGBA>::Ptr(new pcl::PointCloud<pcl::PointXYZRGBA>(*cloud)); // -1 -> part of a plane, 0 -> not part of an object, 1 -> part of an object. std::vector<char> mask = std::vector<char>(cloud->points.size(), 0); assert(coefficients.size() == boundaries.size()); for(int i = 0; i < coefficients.size(); i++) { Eigen::Vector4f planeCoef = Eigen::Vector4f(coefficients[i].values.data()); std::vector<pcl::PointXYZRGBA> planeBoundary = boundaries[i]; #pragma omp parallel for firstprivate(planeCoef, planeBoundary) shared(cloud, mask) num_threads(4) for(size_t j = 0; j < cloud->points.size(); j++) { // Calculate the distance from the point to the plane normal as the dot product // D =(P-A).N/|N| // If the x value of the pointcloud or it is marked as a point in a plane it is not needed to // make further calculations, we don't want this point. if(isnan(cloud->points[j].x) or mask[j] == -1) continue; Eigen::Vector4f pt(cloud->points[j].x, cloud->points[j].y, cloud->points[j].z, 1); float distance = planeCoef.dot(pt); if (distance >= -0.02) { if (isInlier(cloud, j , planeBoundary, planeCoef)) { if (distance <= 0.02) { // If the point is at a distance less than X, then the point is in the plane, we mark it properly. mask[j] = -1; } else { // The point is not marked as being part of an object nor plane, if it is above it we mark it as object. mask[j] = 1; } } } } } // Parse inliers. pcl::PointIndices::Ptr inliers = pcl::PointIndices::Ptr(new pcl::PointIndices()); inliers->indices.resize(cloud->points.size()); int nr_p = 0; for(int i = 0; i < mask.size(); i++) { if(mask[i] == 1) inliers->indices[nr_p++] = i; } inliers->indices.resize(nr_p); // Clustering clusterIndices = std::vector<pcl::PointIndices>(); clustering(cloud, inliers, 0.03, 200, clusterIndices); }
bool searchModel_minimalSamples(const Kernel &kernel, typename Kernel::Model* bestModel, InliersVec *bestInliers = 0, double *bestRMS = 0) { assert(kernel.NumSamples() == Kernel::MinimumSamples()); InliersVec isInlier(kernel.NumSamples()); int best_score = 0; bool bestModelFound = false; std::vector<typename Kernel::Model> possibleModels; kernel.ComputeModelFromMinimumSamples(&possibleModels); for (std::size_t i = 0; i < possibleModels.size(); ++i) { double rms; int model_score = kernel.ComputeInliersForModel(possibleModels[i], &isInlier, bestRMS ? &rms : 0); if (model_score > best_score) { if (bestRMS) { *bestRMS = rms; } best_score = model_score; *bestModel = possibleModels[i]; bestModelFound = true; } } if (!bestModelFound) { return false; } if (bestInliers) { *bestInliers = isInlier; } if (bestRMS) { *bestRMS = kernel.ScalarUnormalize(*bestRMS); } kernel.Unnormalize(bestModel); return true; }
ProsacReturnCodeEnum prosac(const Kernel &kernel, typename Kernel::Model* bestModel, InliersVec *bestInliers = 0, double *bestRMS = 0) { assert(bestModel); const int N = (int)std::min(kernel.NumSamples(), (std::size_t)RAND_MAX); // For us, the draw set is the same as the verification set const int N_draw = N; const int m = (int)Kernel::MinimumSamples(); // Test if we have sufficient points for the kernel. if (N < m) { return eProsacReturnCodeNotEnoughPoints; } else if (N == m) { bool ok = searchModel_minimalSamples(kernel, bestModel, bestInliers, bestRMS); return ok ? eProsacReturnCodeFoundModel : eProsacReturnCodeNoModelFound; } InliersVec isInlier(N); #ifndef PROSAC_DISABLE_LO_RANSAC InliersVec isInlierLO(N); #endif /* NOTE: the PROSAC article sets T_N (the number of iterations before PROSAC becomes RANSAC) to 200000, but that means : - only 535 correspondences out of 1000 will be used after 2808 iterations (60% outliers) - 395 588 (50%) - 170 163 (40%) (the # of iterations is the # of RANSAC draws for a 0.99 confidence of finding the right model given the percentage of outliers) QUESTION: Is it more reasonable to set it to the maximum number of iterations we plan to do given the percentage of outlier? MY ANSWER: If you know that you won't draw more than XX samples (say 2808, because you only accept 60% outliers), then it's more reasonable to set N to that value, in order to give all correspondences a chance to be drawn (even if that chance is very small for the last ones). Anyway, PROSAC should find many inliers in the first rounds and stop right away. T_N=2808 gives: - only 961 correspondences out of 1000 will be used after 2808 iterations (60% outliers) - 595 588 (50%) - 177 163 (40%) */ const int T_N = kernel.maxOutliersProportion >= 1. ? std::numeric_limits<int>::max() : niter_RANSAC(kernel.probability, kernel.maxOutliersProportion, m, kernel.iMaxIter); const int t_max = kernel.iMaxIter > 0 ? kernel.iMaxIter : T_N; const double beta = kernel.getProsacBetaParam(); assert(beta > 0. && beta < 1.); int n_star = N; // termination length (see sec. 2.2 Stopping criterion) int I_n_star = 0; // number of inliers found within the first n_star data points int I_N_best = 0; // best number of inliers found so far (store the model that goes with it) const int I_N_min = (1. - kernel.maxOutliersProportion) * N; // the minimum number of total inliers int t = 0; // iteration number int n = m; // we draw samples from the set U_n of the top n data points double T_n = T_N; // average number of samples {M_i}_{i=1}^{T_N} that contain samples from U_n only int T_n_prime = 1; // integer version of T_n, see eq. (4) for(int i = 0; i < m; ++i) { T_n *= (double)(n - i) / (N - i); } int k_n_star = T_N; // number of samples to draw to reach the maximality constraint bool bestModelFound = false; std::vector<std::size_t> sample(m); // Note: the condition (I_N_best < I_N_min) was not in the original paper, but it is reasonable: // we sholdn't stop if we haven't found the expected number of inliers while (((I_N_best < I_N_min) || t <= k_n_star) && t < T_N && t <= t_max) { int I_N; // total number of inliers for that sample // Choice of the hypothesis generation set t = t + 1; // from the paper, eq. (5) (not Algorithm1): // "The growth function is then defined as // g(t) = min {n : T′n ≥ t}" // Thus n should be incremented if t > T'n, not if t = T'n as written in the algorithm 1 if ((t > T_n_prime) && (n < n_star)) { double T_nplus1 = (T_n * (n+1)) / (n+1-m); n = n+1; T_n_prime = T_n_prime + std::ceil(T_nplus1 - T_n); T_n = T_nplus1; } // Draw semi-random sample (note that the test condition from Algorithm1 in the paper is reversed): if (t > T_n_prime) { // during the finishing stage (n== n_star && t > T_n_prime), draw a standard RANSAC sample // The sample contains m points selected from U_n at random deal(n, m, sample); } else { // The sample contains m-1 points selected from U_{n−1} at random and u_n deal(n - 1, m - 1, sample); sample[m - 1] = n - 1; } // INSERT Compute model parameters p_t from the sample M_t std::vector<typename Kernel::Model> possibleModels; kernel.ComputeModelFromMinimumSamples(sample, &possibleModels); for (std::size_t modelNb = 0; modelNb < possibleModels.size(); ++modelNb) { // Find support of the model with parameters p_t // From first paragraph of section 2: "The hypotheses are verified against all data" double RMS; I_N = kernel.ComputeInliersForModel(possibleModels[modelNb], &isInlier, bestRMS ? &RMS : 0); if (I_N > I_N_best) { int n_best; // best value found so far in terms of inliers ratio int I_n_best; // number of inliers for n_best int I_N_draw; // number of inliers withing the N_draw first data // INSERT (OPTIONAL): Test for degenerate model configuration (DEGENSAC) // (i.e. discard the sample if more than 1 model is consistent with the sample) // ftp://cmp.felk.cvut.cz/pub/cmp/articles/matas/chum-degen-cvpr05.pdf // Do local optimization, and recompute the support (LO-RANSAC) // http://cmp.felk.cvut.cz/~matas/papers/chum-dagm03.pdf // for the fundamental matrix, the normalized 8-points algorithm performs very well: // http://axiom.anu.edu.au/~hartley/Papers/fundamental/ICCV-final/fundamental.pdf // Store the best model *bestModel = possibleModels[modelNb]; bestModelFound = true; if (bestRMS) { *bestRMS = RMS; } #ifndef PROSAC_DISABLE_LO_RANSAC int loransac_iter = 0; while (I_N > I_N_best) { I_N_best = I_N; if (kernel.iMaxLOIter < 0 || loransac_iter < kernel.iMaxLOIter) { // Continue while LO-ransac finds a better support typename Kernel::Model modelLO; double RMS_L0; bool modelOptimized = kernel.OptimizeModel(*bestModel, isInlier, &modelLO); if (modelOptimized) { // IN = findSupport(/* model, sample, */ N, isInlier); int I_N_LO = kernel.ComputeInliersForModel(modelLO, &isInlierLO, bestRMS ? &RMS_L0 : 0); if (I_N_LO > I_N_best) { isInlier = isInlierLO; *bestModel = modelLO; if (bestRMS) { *bestRMS = RMS_L0; } I_N = I_N_LO; } } ++loransac_iter; } // LO-RANSAC } #else if (I_N > I_N_best) { I_N_best = I_N; } #endif if (bestInliers) { *bestInliers = isInlier; } // Select new termination length n_star if possible, according to Sec. 2.2. // Note: the original paper seems to do it each time a new sample is drawn, // but this really makes sense only if the new sample is better than the previous ones. n_best = N; I_n_best = I_N_best; I_N_draw = std::accumulate(isInlier.begin(), isInlier.begin() + N_draw, 0); #ifndef PROSAC_DISABLE_N_STAR_OPTIMIZATION int n_test; // test value for the termination length int I_n_test; // number of inliers for that test value double epsilon_n_best = (double)I_n_best/n_best; for (n_test = N, I_n_test = I_N_draw; n_test > m; n_test--) { // Loop invariants: // - I_n_test is the number of inliers for the n_test first correspondences // - n_best is the value between n_test+1 and N that maximizes the ratio I_n_best/n_best assert(n_test >= I_n_test); // * Non-randomness : In >= Imin(n*) (eq. (9)) // * Maximality: the number of samples that were drawn so far must be enough // so that the probability of having missed a set of inliers is below eta=0.01. // This is the classical RANSAC termination criterion (HZ 4.7.1.2, eq. (4.18)), // except that it takes into account only the n first samples (not the total number of samples). // kn_star = log(eta0)/log(1-(In_star/n_star)^m) (eq. (12)) // We have to minimize kn_star, e.g. maximize I_n_star/n_star //printf("n_best=%d, I_n_best=%d, n_test=%d, I_n_test=%d\n", // n_best, I_n_best, n_test, I_n_test); // a straightforward implementation would use the following test: //if (I_n_test > epsilon_n_best*n_test) { // However, since In is binomial, and in the case of evenly distributed inliers, // a better test would be to reduce n_star only if there's a significant improvement in // epsilon. Thus we use a Chi-squared test (P=0.10), together with the normal approximation // to the binomial (mu = epsilon_n_star*n_test, sigma=sqrt(n_test*epsilon_n_star*(1-epsilon_n_star)). // There is a significant difference between the two tests (e.g. with the findSupport // functions provided above). // We do the cheap test first, and the expensive test only if the cheap one passes. if (( I_n_test * n_best > I_n_best * n_test ) && ( I_n_test > epsilon_n_best * n_test + std::sqrt(n_test * epsilon_n_best * (1. - epsilon_n_best) * 2.706) )) { if (I_n_test < Prosac_Imin(m,n_test,beta)) { // equation 9 not satisfied: no need to test for smaller n_test values anyway break; // jump out of the for(n_test) loop } n_best = n_test; I_n_best = I_n_test; epsilon_n_best = (double)I_n_best / n_best; } // prepare for next loop iteration I_n_test -= isInlier[n_test - 1]; } // for(n_test ... #endif // #ifndef PROSAC_DISABLE_N_STAR_OPTIMIZATION // is the best one we found even better than n_star? if ( I_n_best * n_star > I_n_star * n_best ) { assert(n_best >= I_n_best); // update all values n_star = n_best; I_n_star = I_n_best; k_n_star = niter_RANSAC(1. - kernel.eta0, 1. - I_n_star / (double)n_star, m, T_N); } } // if (I_N > I_N_best) } //for (modelNb ... } // while(t <= k_n_star ... if (!bestModelFound) { return eProsacReturnCodeNoModelFound; } if (bestRMS) { *bestRMS = kernel.ScalarUnormalize(*bestRMS); } kernel.Unnormalize(bestModel); if (t == t_max) { return eProsacReturnCodeMaxIterationsParamReached ; } if (t == T_N) { return eProsacReturnCodeMaxIterationsFromProportionParamReached; } if (I_N_best == m) { return eProsacReturnCodeInliersIsMinSamples; } return eProsacReturnCodeFoundModel; } // prosac