typename Kernel::Model MaxConsensus ( const Kernel &kernel, const Scorer &scorer, std::vector<uint32_t> *best_inliers = nullptr, uint32_t max_iteration = 1024 ) { const uint32_t min_samples = Kernel::MINIMUM_SAMPLES; const uint32_t total_samples = kernel.NumSamples(); size_t best_num_inliers = 0; typename Kernel::Model best_model; // Test if we have sufficient points to for the kernel. if (total_samples < min_samples) { if (best_inliers) { best_inliers->resize(0); } return best_model; } // In this robust estimator, the scorer always works on all the data points // at once. So precompute the list ahead of time. std::vector<uint32_t> all_samples(total_samples); std::iota(all_samples.begin(), all_samples.end(), 0); // Random number generator configuration std::mt19937 random_generator(std::mt19937::default_seed); std::vector<uint32_t> sample; for (uint32_t iteration = 0; iteration < max_iteration; ++iteration) { UniformSample(min_samples, random_generator, &all_samples, &sample); std::vector<typename Kernel::Model> models; kernel.Fit(sample, &models); // Compute costs for each fit. for (const auto& model_it : models) { std::vector<uint32_t> inliers; scorer.Score(kernel, model_it, all_samples, &inliers); if (best_num_inliers < inliers.size()) { best_num_inliers = inliers.size(); best_model = model_it; if (best_inliers) { best_inliers->swap(inliers); } } } } return best_model; }
typename Kernel::Model RANSAC( const Kernel &kernel, const Scorer &scorer, std::vector<size_t> *best_inliers = nullptr , size_t *best_score = nullptr , // Found number of inliers double outliers_probability = 1e-2) { assert(outliers_probability < 1.0); assert(outliers_probability > 0.0); size_t iteration = 0; const size_t min_samples = Kernel::MINIMUM_SAMPLES; const size_t total_samples = kernel.NumSamples(); size_t max_iterations = 100; const size_t really_max_iterations = 4096; size_t best_num_inliers = 0; double best_inlier_ratio = 0.0; typename Kernel::Model best_model; // Test if we have sufficient points for the kernel. if (total_samples < min_samples) { if (best_inliers) { best_inliers->resize(0); } return best_model; } // In this robust estimator, the scorer always works on all the data points // at once. So precompute the list ahead of time [0,..,total_samples]. std::vector<size_t> all_samples(total_samples); std::iota(all_samples.begin(), all_samples.end(), 0); std::vector<size_t> sample; for (iteration = 0; iteration < max_iterations && iteration < really_max_iterations; ++iteration) { UniformSample(min_samples, &all_samples, &sample); std::vector<typename Kernel::Model> models; kernel.Fit(sample, &models); // Compute the inlier list for each fit. for (size_t i = 0; i < models.size(); ++i) { std::vector<size_t> inliers; scorer.Score(kernel, models[i], all_samples, &inliers); if (best_num_inliers < inliers.size()) { best_num_inliers = inliers.size(); best_inlier_ratio = inliers.size() / double(total_samples); best_model = models[i]; if (best_inliers) { best_inliers->swap(inliers); } } if (best_inlier_ratio) { max_iterations = IterationsRequired(min_samples, outliers_probability, best_inlier_ratio); } } } if (best_score) *best_score = best_num_inliers; return best_model; }
double LeastMedianOfSquares(const Kernel &kernel, typename Kernel::Model * model = NULL, double* outlierThreshold = NULL, double outlierRatio=0.5, double minProba=0.99) { const size_t min_samples = Kernel::MINIMUM_SAMPLES; const size_t total_samples = kernel.NumSamples(); std::vector<double> residuals(total_samples); // Array for storing residuals std::vector<size_t> vec_sample(min_samples); double dBestMedian = std::numeric_limits<double>::max(); // Required number of iterations is evaluated from outliers ratio const size_t N = (min_samples<total_samples)? getNumSamples(minProba, outlierRatio, min_samples): 0; for (size_t i=0; i < N; i++) { // Get Samples indexes UniformSample(min_samples, total_samples, &vec_sample); // Estimate parameters: the solutions are stored in a vector std::vector<typename Kernel::Model> models; kernel.Fit(vec_sample, &models); // Now test the solutions on the whole data for (size_t k = 0; k < models.size(); ++k) { //Compute Residuals : for (size_t l = 0; l < total_samples; ++l) { double error = kernel.Error(l, models[k]); residuals[l] = error; } // Compute median std::vector<double>::iterator itMedian = residuals.begin() + std::size_t( total_samples*(1.-outlierRatio) ); std::nth_element(residuals.begin(), itMedian, residuals.end()); double median = *itMedian; // Store best solution if(median < dBestMedian) { dBestMedian = median; if (model) (*model) = models[k]; } } } // This array of precomputed values corresponds to the inverse // cumulative function for a normal distribution. For more information // consult the litterature (Robust Regression for Outlier Detection, // rouseeuw-leroy). The values are computed for each 5% static const double ICDF[21] = { 1.4e16, 15.94723940, 7.957896558, 5.287692054, 3.947153876, 3.138344200, 2.595242369, 2.203797543, 1.906939402, 1.672911853, 1.482602218, 1.323775627, 1.188182950, 1.069988721, 0.9648473415, 0.8693011162, 0.7803041458, 0.6946704675, 0.6079568319,0.5102134568, 0.3236002672 }; // Evaluate the outlier threshold if(outlierThreshold) { double sigma = ICDF[int((1.-outlierRatio)*20.)] * (1. + 5. / double(total_samples - min_samples)); *outlierThreshold = (double)(sigma * sigma * dBestMedian * 4.); if (N==0) *outlierThreshold = std::numeric_limits<double>::max(); } return dBestMedian; }
std::pair<double, double> ACRANSAC(const Kernel &kernel, std::vector<size_t> & vec_inliers, size_t nIter = 1024, typename Kernel::Model * model = NULL, double precision = std::numeric_limits<double>::infinity(), bool bVerbose = false) { vec_inliers.clear(); const size_t sizeSample = Kernel::MINIMUM_SAMPLES; const size_t nData = kernel.NumSamples(); if(nData <= (size_t)sizeSample) return std::make_pair(0.0,0.0); const double maxThreshold = (precision==std::numeric_limits<double>::infinity()) ? std::numeric_limits<double>::infinity() : precision * kernel.normalizer2()(0,0) * kernel.normalizer2()(0,0); std::vector<ErrorIndex> vec_residuals(nData); // [residual,index] std::vector<double> vec_residuals_(nData); std::vector<size_t> vec_sample(sizeSample); // Sample indices // Possible sampling indices (could change in the optimization phase) std::vector<size_t> vec_index(nData); for (size_t i = 0; i < nData; ++i) vec_index[i] = i; // Precompute log combi double loge0 = log10((double)Kernel::MAX_MODELS * (nData-sizeSample)); std::vector<float> vec_logc_n, vec_logc_k; makelogcombi_n(nData, vec_logc_n); makelogcombi_k(sizeSample, nData, vec_logc_k); // Output parameters double minNFA = std::numeric_limits<double>::infinity(); double errorMax = std::numeric_limits<double>::infinity(); // Reserve 10% of iterations for focused sampling size_t nIterReserve = nIter/10; nIter -= nIterReserve; // Main estimation loop. for (size_t iter=0; iter < nIter; ++iter) { UniformSample(sizeSample, vec_index, &vec_sample); // Get random sample std::vector<typename Kernel::Model> vec_models; // Up to max_models solutions kernel.Fit(vec_sample, &vec_models); // Evaluate models bool better = false; for (size_t k = 0; k < vec_models.size(); ++k) { // Residuals computation and ordering kernel.Errors(vec_models[k], vec_residuals_); for (size_t i = 0; i < nData; ++i) { const double error = vec_residuals_[i]; vec_residuals[i] = ErrorIndex(error, i); } std::sort(vec_residuals.begin(), vec_residuals.end()); // Most meaningful discrimination inliers/outliers const ErrorIndex best = bestNFA( sizeSample, kernel.logalpha0(), vec_residuals, loge0, maxThreshold, vec_logc_n, vec_logc_k, kernel.multError()); if (best.first < minNFA /*&& vec_residuals[best.second-1].first < errorMax*/) { // A better model was found better = true; minNFA = best.first; vec_inliers.resize(best.second); for (size_t i=0; i<best.second; ++i) vec_inliers[i] = vec_residuals[i].second; errorMax = vec_residuals[best.second-1].first; // Error threshold if(model) *model = vec_models[k]; if(bVerbose) { std::cout << " nfa=" << minNFA << " inliers=" << best.second << " precisionNormalized=" << errorMax << " precision=" << kernel.unormalizeError(errorMax) << " (iter=" << iter; std::cout << ",sample="; std::copy(vec_sample.begin(), vec_sample.end(), std::ostream_iterator<size_t>(std::cout, ",")); std::cout << ")" <<std::endl; } } } // ACRANSAC optimization: draw samples among best set of inliers so far if((better && minNFA<0) || (iter+1==nIter && nIterReserve)) { if(vec_inliers.empty()) { // No model found at all so far nIter++; // Continue to look for any model, even not meaningful nIterReserve--; } else { // ACRANSAC optimization: draw samples among best set of inliers so far vec_index = vec_inliers; if(nIterReserve) { nIter = iter+1+nIterReserve; nIterReserve=0; } } } } if(minNFA >= 0) vec_inliers.clear(); if (!vec_inliers.empty()) { if (model) kernel.Unnormalize(model); errorMax = kernel.unormalizeError(errorMax); } return std::make_pair(errorMax, minNFA); }
typename Kernel::Model RANSAC( const Kernel &kernel, const Scorer &scorer, std::vector<size_t> *best_inliers = NULL, double *best_score = NULL, double outliers_probability = 1e-2) { assert(outliers_probability < 1.0); assert(outliers_probability > 0.0); size_t iteration = 0; const size_t min_samples = Kernel::MINIMUM_SAMPLES; const size_t total_samples = kernel.NumSamples(); size_t max_iterations = 100; const size_t really_max_iterations = 4096; size_t best_num_inliers = 0; double best_cost = std::numeric_limits<double>::infinity(); double best_inlier_ratio = 0.0; typename Kernel::Model best_model; // Test if we have sufficient points for the kernel. if (total_samples < min_samples) { if (best_inliers) { best_inliers->resize(0); } return best_model; } // In this robust estimator, the scorer always works on all the data points // at once. So precompute the list ahead of time. std::vector<size_t> all_samples; for (size_t i = 0; i < total_samples; ++i) { all_samples.push_back(i); } std::vector<size_t> sample; for (iteration = 0; iteration < max_iterations && iteration < really_max_iterations; ++iteration) { UniformSample(min_samples, total_samples, &sample); std::vector<typename Kernel::Model> models; kernel.Fit(sample, &models); // Compute costs for each fit. for (size_t i = 0; i < models.size(); ++i) { std::vector<size_t> inliers; double cost = scorer.Score(kernel, models[i], all_samples, &inliers); if (cost < best_cost) { best_cost = cost; best_inlier_ratio = inliers.size() / double(total_samples); best_num_inliers = inliers.size(); best_model = models[i]; if (best_inliers) { best_inliers->swap(inliers); } } if (best_inlier_ratio) { max_iterations = IterationsRequired(min_samples, outliers_probability, best_inlier_ratio); } } } if (best_score) *best_score = best_cost; return best_model; }