void EvalInlier(const Kernel &kernel, const typename Kernel::Model &model,
	double threshold, std::vector<size_t> * vec_inliers)
{
	ScorerEvaluator<Kernel> scorer(threshold);
	std::vector<size_t> vec_index(kernel.NumSamples());
	for (size_t i = 0; i < kernel.NumSamples(); ++i)
		vec_index[i] = i;

	scorer.Score(kernel, model, vec_index, &(*vec_inliers));
}
int searchModelWithMEstimator(const Kernel &kernel,
                              int maxNbIterations,
                              typename Kernel::Model* bestModel,
                              double *RMS = 0,
                              double *sigmaMAD_p = 0)
{
    assert(bestModel);
    const int N = (int)kernel.NumSamples();
    const int m = (int)Kernel::MinimumSamples();
    
    // Test if we have sufficient points for the kernel.
    if (N < m) {
        return 0;
    } else if (N == m) {
        bool ok = searchModel_minimalSamples(kernel, bestModel, 0, RMS);
        return ok ? 1 : 0;
    }

    // Compute a first model on all samples with least squares
    int hasModel = kernel.ComputeModelFromAllSamples(bestModel);
    if (!hasModel) {
        return 0;
    }

    InliersVec isInlier(N, true);
    
    int nbSuccessfulIterations = kernel.MEstimator(*bestModel, isInlier, maxNbIterations, bestModel, RMS, sigmaMAD_p);
    if (RMS) {
        *RMS = kernel.ScalarUnormalize(*RMS);
    }
    kernel.Unnormalize(bestModel);
    return nbSuccessfulIterations;

} // searchModelWithMEstimator
bool searchModelLS(const Kernel &kernel,
                  typename Kernel::Model* bestModel,
                   double *RMS = 0)
{
    assert(bestModel);
    const int N = (int)kernel.NumSamples();
    const int m = (int)Kernel::MinimumSamples();
    
    // Test if we have sufficient points for the kernel.
    if (N < m) {
        return 0;
    } else if (N == m) {
        return searchModel_minimalSamples(kernel, bestModel, 0, RMS);
    }
    
    bool ok = kernel.ComputeModelFromAllSamples(bestModel);
    if (RMS) {
        InliersVec isInlier(N);
        int nInliers = kernel.ComputeInliersForModel(*bestModel, &isInlier, RMS);
        (void)nInliers;
    }
    if (RMS) {
        *RMS = kernel.ScalarUnormalize(*RMS);
    }
    
    kernel.Unnormalize(bestModel);
    return ok;
    
} // searchModelWithMEstimator
typename Kernel::Model MaxConsensus
(
  const Kernel &kernel,
  const Scorer &scorer,
  std::vector<uint32_t> *best_inliers = nullptr,
  uint32_t max_iteration = 1024
)
{
  const uint32_t min_samples = Kernel::MINIMUM_SAMPLES;
  const uint32_t total_samples = kernel.NumSamples();

  size_t best_num_inliers = 0;
  typename Kernel::Model best_model;

  // Test if we have sufficient points to for the kernel.
  if (total_samples < min_samples) {
    if (best_inliers) {
      best_inliers->resize(0);
    }
    return best_model;
  }

  // In this robust estimator, the scorer always works on all the data points
  // at once. So precompute the list ahead of time.
  std::vector<uint32_t> all_samples(total_samples);
  std::iota(all_samples.begin(), all_samples.end(), 0);

  // Random number generator configuration
  std::mt19937 random_generator(std::mt19937::default_seed);

  std::vector<uint32_t> sample;
  for (uint32_t iteration = 0;  iteration < max_iteration; ++iteration) {
    UniformSample(min_samples, random_generator, &all_samples, &sample);

      std::vector<typename Kernel::Model> models;
      kernel.Fit(sample, &models);

      // Compute costs for each fit.
      for (const auto& model_it : models) {
        std::vector<uint32_t> inliers;
        scorer.Score(kernel, model_it, all_samples, &inliers);

        if (best_num_inliers < inliers.size()) {
          best_num_inliers = inliers.size();
          best_model = model_it;
          if (best_inliers) {
            best_inliers->swap(inliers);
          }
        }
      }
  }
  return best_model;
}
bool searchModel_minimalSamples(const Kernel &kernel,
                                typename Kernel::Model* bestModel,
                                InliersVec *bestInliers = 0,
                                double *bestRMS = 0)
{
    assert(kernel.NumSamples() == Kernel::MinimumSamples());
    
    InliersVec isInlier(kernel.NumSamples());
    int best_score = 0;
    bool bestModelFound = false;
    std::vector<typename Kernel::Model> possibleModels;
    kernel.ComputeModelFromMinimumSamples(&possibleModels);
    for (std::size_t i = 0; i < possibleModels.size(); ++i) {
        
        double rms;
        int model_score = kernel.ComputeInliersForModel(possibleModels[i], &isInlier, bestRMS ? &rms : 0);
        if (model_score > best_score) {
            if (bestRMS) {
                *bestRMS = rms;
            }
            best_score = model_score;
            *bestModel = possibleModels[i];
            bestModelFound = true;
        }
    }
    if (!bestModelFound) {
        return false;
    }
    if (bestInliers) {
        *bestInliers = isInlier;
    }
    if (bestRMS) {
        *bestRMS = kernel.ScalarUnormalize(*bestRMS);
    }
    kernel.Unnormalize(bestModel);
    return true;
}
typename Kernel::Model RANSAC(
  const Kernel &kernel,
  const Scorer &scorer,
  std::vector<size_t> *best_inliers = nullptr ,
  size_t *best_score = nullptr , // Found number of inliers
  double outliers_probability = 1e-2)
{
  assert(outliers_probability < 1.0);
  assert(outliers_probability > 0.0);
  size_t iteration = 0;
  const size_t min_samples = Kernel::MINIMUM_SAMPLES;
  const size_t total_samples = kernel.NumSamples();

  size_t max_iterations = 100;
  const size_t really_max_iterations = 4096;

  size_t best_num_inliers = 0;
  double best_inlier_ratio = 0.0;
  typename Kernel::Model best_model;

  // Test if we have sufficient points for the kernel.
  if (total_samples < min_samples) {
    if (best_inliers) {
      best_inliers->resize(0);
    }
    return best_model;
  }

  // In this robust estimator, the scorer always works on all the data points
  // at once. So precompute the list ahead of time [0,..,total_samples].
  std::vector<size_t> all_samples(total_samples);
  std::iota(all_samples.begin(), all_samples.end(), 0);

  std::vector<size_t> sample;
  for (iteration = 0;
    iteration < max_iterations &&
    iteration < really_max_iterations; ++iteration) {
      UniformSample(min_samples, &all_samples, &sample);

      std::vector<typename Kernel::Model> models;
      kernel.Fit(sample, &models);

      // Compute the inlier list for each fit.
      for (size_t i = 0; i < models.size(); ++i) {
        std::vector<size_t> inliers;
        scorer.Score(kernel, models[i], all_samples, &inliers);

        if (best_num_inliers < inliers.size()) {
          best_num_inliers = inliers.size();
          best_inlier_ratio = inliers.size() / double(total_samples);
          best_model = models[i];
          if (best_inliers) {
            best_inliers->swap(inliers);
          }
        }
        if (best_inlier_ratio) {
          max_iterations = IterationsRequired(min_samples,
            outliers_probability,
            best_inlier_ratio);
        }
      }
  }
  if (best_score)
    *best_score = best_num_inliers;
  return best_model;
}
  double LeastMedianOfSquares(const Kernel &kernel,
	  typename Kernel::Model * model = NULL,
    double* outlierThreshold = NULL,
    double outlierRatio=0.5,
	  double minProba=0.99)
{
  const size_t min_samples = Kernel::MINIMUM_SAMPLES;
  const size_t total_samples = kernel.NumSamples();

	std::vector<double> residuals(total_samples); // Array for storing residuals
  std::vector<size_t> vec_sample(min_samples);

	double dBestMedian = std::numeric_limits<double>::max();

	// Required number of iterations is evaluated from outliers ratio
	const size_t N = (min_samples<total_samples)?
		getNumSamples(minProba, outlierRatio, min_samples): 0;

	for (size_t i=0; i < N; i++) {

    // Get Samples indexes
    UniformSample(min_samples, total_samples, &vec_sample);

    // Estimate parameters: the solutions are stored in a vector
    std::vector<typename Kernel::Model> models;
    kernel.Fit(vec_sample, &models);

		// Now test the solutions on the whole data
		for (size_t k = 0; k < models.size(); ++k) {
      //Compute Residuals :
      for (size_t l = 0; l < total_samples; ++l) {
        double error = kernel.Error(l, models[k]);
        residuals[l] = error;
      }

			// Compute median
			std::vector<double>::iterator itMedian = residuals.begin() +
				std::size_t( total_samples*(1.-outlierRatio) );
			std::nth_element(residuals.begin(), itMedian, residuals.end());
			double median = *itMedian;

			// Store best solution
			if(median < dBestMedian) {
				dBestMedian = median;
				if (model) (*model) = models[k];
			}
		}
	}

	// This array of precomputed values corresponds to the inverse
	//  cumulative function for a normal distribution. For more information
	//  consult the litterature (Robust Regression for Outlier Detection,
	//  rouseeuw-leroy). The values are computed for each 5%
	static const double ICDF[21] = {
		1.4e16, 15.94723940, 7.957896558, 5.287692054,
		3.947153876, 3.138344200, 2.595242369, 2.203797543,
		1.906939402, 1.672911853, 1.482602218, 1.323775627,
		1.188182950, 1.069988721, 0.9648473415, 0.8693011162,
		0.7803041458, 0.6946704675, 0.6079568319,0.5102134568,
		0.3236002672
	};

	// Evaluate the outlier threshold
	if(outlierThreshold) {
		double sigma = ICDF[int((1.-outlierRatio)*20.)] *
			(1. + 5. / double(total_samples - min_samples));
		*outlierThreshold = (double)(sigma * sigma * dBestMedian * 4.);
    if (N==0) *outlierThreshold = std::numeric_limits<double>::max();
	}

	return dBestMedian;
}
ProsacReturnCodeEnum prosac(const Kernel &kernel,
                            typename Kernel::Model* bestModel,
                            InliersVec *bestInliers = 0,
                            double *bestRMS = 0)
{
    assert(bestModel);
    
    const int N = (int)std::min(kernel.NumSamples(), (std::size_t)RAND_MAX);
    
    // For us, the draw set is the same as the verification set
    const int N_draw = N;
    const int m = (int)Kernel::MinimumSamples();
    
    
    // Test if we have sufficient points for the kernel.
    if (N < m) {
        return eProsacReturnCodeNotEnoughPoints;
    } else if (N == m) {
        bool ok = searchModel_minimalSamples(kernel, bestModel, bestInliers, bestRMS);
        return ok ? eProsacReturnCodeFoundModel : eProsacReturnCodeNoModelFound;
    }
    
    InliersVec isInlier(N);
#ifndef PROSAC_DISABLE_LO_RANSAC
    InliersVec isInlierLO(N);
#endif
    
    /* NOTE: the PROSAC article sets T_N (the number of iterations before PROSAC becomes RANSAC) to 200000,
     but that means :
     - only 535 correspondences out of 1000 will be used after 2808 iterations (60% outliers)
     -      395                                                 588            (50%)
     -      170                                                 163            (40%)
     (the # of iterations is the # of RANSAC draws for a 0.99 confidence
     of finding the right model given the percentage of outliers)
     
     QUESTION: Is it more reasonable to set it to the maximum number of iterations we plan to
     do given the percentage of outlier?
     
     MY ANSWER: If you know that you won't draw more than XX samples (say 2808, because you only
     accept 60% outliers), then it's more reasonable to set N to that value, in order to give
     all correspondences a chance to be drawn (even if that chance is very small for the last ones).
     Anyway, PROSAC should find many inliers in the first rounds and stop right away.
     
     T_N=2808 gives:
     - only 961 correspondences out of 1000 will be used after 2808 iterations (60% outliers)
     -      595                                                 588            (50%)
     -      177                                                 163            (40%)
     
     */

    const int T_N = kernel.maxOutliersProportion >= 1. ?  std::numeric_limits<int>::max() : niter_RANSAC(kernel.probability, kernel.maxOutliersProportion, m, kernel.iMaxIter);
    const int t_max = kernel.iMaxIter > 0 ? kernel.iMaxIter : T_N;
    
    const double beta = kernel.getProsacBetaParam();
    assert(beta > 0. && beta < 1.);
    int n_star = N; // termination length (see sec. 2.2 Stopping criterion)
    int I_n_star = 0; // number of inliers found within the first n_star data points
    int I_N_best = 0; // best number of inliers found so far (store the model that goes with it)
    const int I_N_min = (1. - kernel.maxOutliersProportion) * N; // the minimum number of total inliers
    int t = 0; // iteration number
    int n = m; // we draw samples from the set U_n of the top n data points
    double T_n = T_N; // average number of samples {M_i}_{i=1}^{T_N} that contain samples from U_n only
    int T_n_prime = 1; // integer version of T_n, see eq. (4)
    
    for(int i = 0; i < m; ++i) {
        T_n *= (double)(n - i) / (N - i);
    }
    int k_n_star = T_N; // number of samples to draw to reach the maximality constraint

    bool bestModelFound = false;
    
    std::vector<std::size_t> sample(m);

    // Note: the condition (I_N_best < I_N_min) was not in the original paper, but it is reasonable:
    // we sholdn't stop if we haven't found the expected number of inliers
    while (((I_N_best < I_N_min) || t <= k_n_star) && t < T_N && t <= t_max) {
        int I_N; // total number of inliers for that sample
        
        // Choice of the hypothesis generation set
        t = t + 1;
        
        // from the paper, eq. (5) (not Algorithm1):
        // "The growth function is then defined as
        //  g(t) = min {n : T′n ≥ t}"
        // Thus n should be incremented if t > T'n, not if t = T'n as written in the algorithm 1
        if ((t > T_n_prime) && (n < n_star)) {
            double T_nplus1 = (T_n * (n+1)) / (n+1-m);
            n = n+1;
            T_n_prime = T_n_prime + std::ceil(T_nplus1 - T_n);
            T_n = T_nplus1;
        }
        
        // Draw semi-random sample (note that the test condition from Algorithm1 in the paper is reversed):
        if (t > T_n_prime) {
            // during the finishing stage (n== n_star && t > T_n_prime), draw a standard RANSAC sample
            // The sample contains m points selected from U_n at random
            deal(n, m, sample);
        }  else {
            // The sample contains m-1 points selected from U_{n−1} at random and u_n
            deal(n - 1, m - 1, sample);
            sample[m - 1] = n - 1;
        }
        
        // INSERT Compute model parameters p_t from the sample M_t
        std::vector<typename Kernel::Model> possibleModels;
        kernel.ComputeModelFromMinimumSamples(sample, &possibleModels);
        
        for (std::size_t modelNb = 0; modelNb < possibleModels.size(); ++modelNb) {
            
            
            // Find support of the model with parameters p_t
            // From first paragraph of section 2: "The hypotheses are verified against all data"
            
            double RMS;
            I_N = kernel.ComputeInliersForModel(possibleModels[modelNb], &isInlier, bestRMS ? &RMS : 0);
            
            
            if (I_N > I_N_best) {
                int n_best; // best value found so far in terms of inliers ratio
                int I_n_best; // number of inliers for n_best
                int I_N_draw; // number of inliers withing the N_draw first data
                
                
                // INSERT (OPTIONAL): Test for degenerate model configuration (DEGENSAC)
                //                    (i.e. discard the sample if more than 1 model is consistent with the sample)
                // ftp://cmp.felk.cvut.cz/pub/cmp/articles/matas/chum-degen-cvpr05.pdf
                
                // Do local optimization, and recompute the support (LO-RANSAC)
                // http://cmp.felk.cvut.cz/~matas/papers/chum-dagm03.pdf
                // for the fundamental matrix, the normalized 8-points algorithm performs very well:
                // http://axiom.anu.edu.au/~hartley/Papers/fundamental/ICCV-final/fundamental.pdf
                
                
                // Store the best model
                *bestModel = possibleModels[modelNb];
                bestModelFound = true;
                if (bestRMS) {
                    *bestRMS = RMS;
                }
                
#ifndef PROSAC_DISABLE_LO_RANSAC
                int loransac_iter = 0;
                while (I_N > I_N_best) {
                    I_N_best = I_N;
                    
                    if (kernel.iMaxLOIter < 0 || loransac_iter < kernel.iMaxLOIter) {
                        
                        // Continue while LO-ransac finds a better support
                        typename Kernel::Model modelLO;
                        double RMS_L0;
                        bool modelOptimized = kernel.OptimizeModel(*bestModel, isInlier, &modelLO);
                        
                        if (modelOptimized) {
                            // IN = findSupport(/* model, sample, */ N, isInlier);
                            int I_N_LO = kernel.ComputeInliersForModel(modelLO, &isInlierLO, bestRMS ? &RMS_L0 : 0);
                            if (I_N_LO > I_N_best) {
                                isInlier = isInlierLO;
                                *bestModel = modelLO;
                                if (bestRMS) {
                                    *bestRMS = RMS_L0;
                                }
                                I_N = I_N_LO;
                            }
                        }
                        ++loransac_iter;
                    } // LO-RANSAC
                }
#else
                if (I_N > I_N_best) {
                    I_N_best = I_N;
                }
#endif
                
                if (bestInliers) {
                    *bestInliers = isInlier;
                }
                
                // Select new termination length n_star if possible, according to Sec. 2.2.
                // Note: the original paper seems to do it each time a new sample is drawn,
                // but this really makes sense only if the new sample is better than the previous ones.
                n_best = N;
                I_n_best = I_N_best;
                I_N_draw = std::accumulate(isInlier.begin(), isInlier.begin() + N_draw, 0);
#ifndef PROSAC_DISABLE_N_STAR_OPTIMIZATION
                int n_test; // test value for the termination length
                int I_n_test; // number of inliers for that test value
                double epsilon_n_best = (double)I_n_best/n_best;
                
                for (n_test = N, I_n_test = I_N_draw; n_test > m; n_test--) {
                    // Loop invariants:
                    // - I_n_test is the number of inliers for the n_test first correspondences
                    // - n_best is the value between n_test+1 and N that maximizes the ratio I_n_best/n_best
                    assert(n_test >= I_n_test);
                    
                    // * Non-randomness : In >= Imin(n*) (eq. (9))
                    // * Maximality: the number of samples that were drawn so far must be enough
                    // so that the probability of having missed a set of inliers is below eta=0.01.
                    // This is the classical RANSAC termination criterion (HZ 4.7.1.2, eq. (4.18)),
                    // except that it takes into account only the n first samples (not the total number of samples).
                    // kn_star = log(eta0)/log(1-(In_star/n_star)^m) (eq. (12))
                    // We have to minimize kn_star, e.g. maximize I_n_star/n_star
                    //printf("n_best=%d, I_n_best=%d, n_test=%d, I_n_test=%d\n",
                    //        n_best,    I_n_best,    n_test,    I_n_test);
                    // a straightforward implementation would use the following test:
                    //if (I_n_test > epsilon_n_best*n_test) {
                    // However, since In is binomial, and in the case of evenly distributed inliers,
                    // a better test would be to reduce n_star only if there's a significant improvement in
                    // epsilon. Thus we use a Chi-squared test (P=0.10), together with the normal approximation
                    // to the binomial (mu = epsilon_n_star*n_test, sigma=sqrt(n_test*epsilon_n_star*(1-epsilon_n_star)).
                    // There is a significant difference between the two tests (e.g. with the findSupport
                    // functions provided above).
                    // We do the cheap test first, and the expensive test only if the cheap one passes.
                    if (( I_n_test * n_best > I_n_best * n_test ) &&
                        ( I_n_test > epsilon_n_best * n_test + std::sqrt(n_test * epsilon_n_best * (1. - epsilon_n_best) * 2.706) )) {
                        if (I_n_test < Prosac_Imin(m,n_test,beta)) {
                            // equation 9 not satisfied: no need to test for smaller n_test values anyway
                            break; // jump out of the for(n_test) loop
                        }
                        n_best = n_test;
                        I_n_best = I_n_test;
                        epsilon_n_best = (double)I_n_best / n_best;
                    }
                    
                    // prepare for next loop iteration
                    I_n_test -= isInlier[n_test - 1];
                } // for(n_test ...
#endif // #ifndef PROSAC_DISABLE_N_STAR_OPTIMIZATION
                
                // is the best one we found even better than n_star?
                if ( I_n_best * n_star > I_n_star * n_best ) {
                    assert(n_best >= I_n_best);
                    // update all values
                    n_star = n_best;
                    I_n_star = I_n_best;
                    k_n_star = niter_RANSAC(1. - kernel.eta0, 1. - I_n_star / (double)n_star, m, T_N);
                }
            } // if (I_N > I_N_best)
        } //for (modelNb ...
    } // while(t <= k_n_star ...
    
    if (!bestModelFound) {
        return eProsacReturnCodeNoModelFound;
    }
    
    if (bestRMS) {
        *bestRMS = kernel.ScalarUnormalize(*bestRMS);
    }
    
    kernel.Unnormalize(bestModel);

    
    if (t == t_max) {
        return eProsacReturnCodeMaxIterationsParamReached ;
    }
    
    if (t == T_N) {
        return eProsacReturnCodeMaxIterationsFromProportionParamReached;
    }
    
    if (I_N_best == m) {
        return eProsacReturnCodeInliersIsMinSamples;
    }
    
    return eProsacReturnCodeFoundModel;
} // prosac
std::pair<double, double> ACRANSAC(const Kernel &kernel,
  std::vector<size_t> & vec_inliers,
  size_t nIter = 1024,
  typename Kernel::Model * model = NULL,
  double precision = std::numeric_limits<double>::infinity(),
  bool bVerbose = false)
{
  vec_inliers.clear();

  const size_t sizeSample = Kernel::MINIMUM_SAMPLES;
  const size_t nData = kernel.NumSamples();
  if(nData <= (size_t)sizeSample)
    return std::make_pair(0.0,0.0);

  const double maxThreshold = (precision==std::numeric_limits<double>::infinity()) ?
    std::numeric_limits<double>::infinity() :
    precision * kernel.normalizer2()(0,0) * kernel.normalizer2()(0,0);

  std::vector<ErrorIndex> vec_residuals(nData); // [residual,index]
  std::vector<double> vec_residuals_(nData);
  std::vector<size_t> vec_sample(sizeSample); // Sample indices

  // Possible sampling indices (could change in the optimization phase)
  std::vector<size_t> vec_index(nData);
  for (size_t i = 0; i < nData; ++i)
    vec_index[i] = i;

  // Precompute log combi
  double loge0 = log10((double)Kernel::MAX_MODELS * (nData-sizeSample));
  std::vector<float> vec_logc_n, vec_logc_k;
  makelogcombi_n(nData, vec_logc_n);
  makelogcombi_k(sizeSample, nData, vec_logc_k);

  // Output parameters
  double minNFA = std::numeric_limits<double>::infinity();
  double errorMax = std::numeric_limits<double>::infinity();

  // Reserve 10% of iterations for focused sampling
  size_t nIterReserve = nIter/10;
  nIter -= nIterReserve;

  // Main estimation loop.
  for (size_t iter=0; iter < nIter; ++iter) {
    UniformSample(sizeSample, vec_index, &vec_sample); // Get random sample

    std::vector<typename Kernel::Model> vec_models; // Up to max_models solutions
    kernel.Fit(vec_sample, &vec_models);

    // Evaluate models
    bool better = false;
    for (size_t k = 0; k < vec_models.size(); ++k)  {
      // Residuals computation and ordering
      kernel.Errors(vec_models[k], vec_residuals_);
      for (size_t i = 0; i < nData; ++i)  {
        const double error = vec_residuals_[i];
        vec_residuals[i] = ErrorIndex(error, i);
      }
      std::sort(vec_residuals.begin(), vec_residuals.end());

      // Most meaningful discrimination inliers/outliers
      const ErrorIndex best = bestNFA(
        sizeSample,
        kernel.logalpha0(),
        vec_residuals,
        loge0,
        maxThreshold,
        vec_logc_n,
        vec_logc_k,
        kernel.multError());

      if (best.first < minNFA /*&& vec_residuals[best.second-1].first < errorMax*/)  {
        // A better model was found
        better = true;
        minNFA = best.first;
        vec_inliers.resize(best.second);
        for (size_t i=0; i<best.second; ++i)
          vec_inliers[i] = vec_residuals[i].second;
        errorMax = vec_residuals[best.second-1].first; // Error threshold
        if(model) *model = vec_models[k];

        if(bVerbose)  {
          std::cout << "  nfa=" << minNFA
            << " inliers=" << best.second
            << " precisionNormalized=" << errorMax
            << " precision=" << kernel.unormalizeError(errorMax)
            << " (iter=" << iter;
          std::cout << ",sample=";
          std::copy(vec_sample.begin(), vec_sample.end(),
            std::ostream_iterator<size_t>(std::cout, ","));
          std::cout << ")" <<std::endl;
        }
      }
    }

    // ACRANSAC optimization: draw samples among best set of inliers so far
    if((better && minNFA<0) || (iter+1==nIter && nIterReserve)) {
      if(vec_inliers.empty()) { // No model found at all so far
        nIter++; // Continue to look for any model, even not meaningful
        nIterReserve--;
      } else {
        // ACRANSAC optimization: draw samples among best set of inliers so far
        vec_index = vec_inliers;
        if(nIterReserve) {
            nIter = iter+1+nIterReserve;
            nIterReserve=0;
        }
      }
    }
  }

  if(minNFA >= 0)
    vec_inliers.clear();

  if (!vec_inliers.empty())
  {
    if (model)
      kernel.Unnormalize(model);
    errorMax = kernel.unormalizeError(errorMax);
  }

  return std::make_pair(errorMax, minNFA);
}
typename Kernel::Model RANSAC(
  const Kernel &kernel,
  const Scorer &scorer,
  std::vector<size_t> *best_inliers = NULL,
  double *best_score = NULL,
  double outliers_probability = 1e-2)
{
  assert(outliers_probability < 1.0);
  assert(outliers_probability > 0.0);
  size_t iteration = 0;
  const size_t min_samples = Kernel::MINIMUM_SAMPLES;
  const size_t total_samples = kernel.NumSamples();

  size_t max_iterations = 100;
  const size_t really_max_iterations = 4096;

  size_t best_num_inliers = 0;
  double best_cost = std::numeric_limits<double>::infinity();
  double best_inlier_ratio = 0.0;
  typename Kernel::Model best_model;

  // Test if we have sufficient points for the kernel.
  if (total_samples < min_samples) {
    if (best_inliers) {
      best_inliers->resize(0);
    }
    return best_model;
  }

  // In this robust estimator, the scorer always works on all the data points
  // at once. So precompute the list ahead of time.
  std::vector<size_t> all_samples;
  for (size_t i = 0; i < total_samples; ++i) {
    all_samples.push_back(i);
  }

  std::vector<size_t> sample;
  for (iteration = 0;
    iteration < max_iterations &&
    iteration < really_max_iterations; ++iteration) {
      UniformSample(min_samples, total_samples, &sample);

      std::vector<typename Kernel::Model> models;
      kernel.Fit(sample, &models);

      // Compute costs for each fit.
      for (size_t i = 0; i < models.size(); ++i) {
        std::vector<size_t> inliers;
        double cost = scorer.Score(kernel, models[i], all_samples, &inliers);

        if (cost < best_cost) {
          best_cost = cost;
          best_inlier_ratio = inliers.size() / double(total_samples);
          best_num_inliers = inliers.size();
          best_model = models[i];
          if (best_inliers) {
            best_inliers->swap(inliers);
          }
        }
        if (best_inlier_ratio) {
          max_iterations = IterationsRequired(min_samples,
            outliers_probability,
            best_inlier_ratio);
        }
      }
  }
  if (best_score)
    *best_score = best_cost;
  return best_model;
}