Beispiel #1
0
void compute_moisan_planar_homography_n_points(float *x0, float *y0, float *x1, float *y1, int n, int niter, float &epsilon, float **H, int &counter, int recursivity)
{



	// Initialize seed		
	srand( (long int) time (NULL) );	
	

	float **Haux = allocate_float_matrix(3,3);

	float *dist = new float[n];
	float *dindexos = new float[n];

	int nselected = 0;
	float *i0selected = new float[n];
	float *j0selected = new float[n];

	float *i1selected = new float[n];
	float *j1selected = new float[n];


  	/* tabulate logcombi */
	float loge0 = (float)log10((double)(n-4));
  	float *logcn = makelogcombi_n(n);
	float *logc4 = makelogcombi_k(4,n); 


	// Normalize points using minimum and maximum coordinates 
	float xmin, xmax, ymin, ymax;
	xmin = xmax = x1[0];
	ymin = ymax = x1[0];

	for(int i=0; i < n; i++)
	{
		if (x1[i] < xmin) xmin = x1[i];
		if (y1[i] < ymin) ymin = y1[i];

		if (x1[i] > xmax) xmax = x1[i];
		if (y1[i] > ymax) ymax = y1[i];


	}

	float mepsilon = 10000000.0f;
	for(int iter = 0; iter < niter; iter++)
	{

		// Initializing distances and indexos for the whole vector
		for(int i=0; i < n ; i++)
		{

			dist[i] = 0.0;
			dindexos[i] = (float) i;
		}



		// Choose 4 indexos from 1..n without repeated values
		int indexos[4];
		int acceptable = 0;
		while (!acceptable)
		{
			acceptable = 1;
			for(int i=0; i < 4; i++) indexos[i] = (int)  floor(rand()/(double)RAND_MAX * (double) n);

			// Check if indexos are repeated
			for(int i=0; i < 4; i++)
				for(int j=i+1; j < 4; j++)
					if (indexos[i] == indexos[j]) acceptable = 0; 
		}


		


		// Store selected matches 
		float px0[4] , py0[4], px1[4] , py1[4];
		for(int i=0; i < 4; i++)
		{
			px0[i] = x0[indexos[i]];
			py0[i] = y0[indexos[i]];

			px1[i] = x1[indexos[i]];
			py1[i] = y1[indexos[i]];
		}


		// Compute planar homography
		compute_planar_homography_n_points(px0, py0, px1, py1, 4, Haux);



		for(int i=0; i < n; i++)
		{

			float vec[3];
			vec[0] = x0[i];
			vec[1] = y0[i];
			vec[2] = 1.0f;
		
			float res[3];
			float_vector_matrix_product(Haux, vec ,res , 3);

			if (res[2] != 0.0f) {

				res[0] /= res[2]; res[1] /= res[2];

				float dif = (res[0] - x1[i]) * (res[0] - x1[i]) + (res[1] - y1[i]) * (res[1] - y1[i]);
			
				dist[i] = dif;

				//if (dif < tolerance2) {  paccorded[pnaccorded] = i; pnaccorded++; }
			
			} else dist[i] = 100000000.0f;

			
		}


		// Order distances
		quick_sort(dist,dindexos,n);	
	

		// Look for most meaningful subset
		for(int i=4 ; i < n; i++)
		{

			float rigidity = dist[i];	
			float logalpha = 0.5f*(float)log10((double) rigidity);
					
			float nfa = (float)  loge0  +  (float)(i-3) * logalpha + logcn[i+1] + logc4[i+1] ;

			if (nfa < mepsilon) 
			{

				mepsilon = nfa;
				for(int ki=0; ki < 3; ki++) for(int kj=0; kj < 3; kj++) H[ki][kj] = Haux[ki][kj];

				nselected = i;
	
				for(int ki=0; ki < nselected; ki++) {
						i0selected[ki] = x0[(int) dindexos[ki]];
						j0selected[ki] = y0[(int) dindexos[ki]];
						i1selected[ki] = x1[(int) dindexos[ki]];
						j1selected[ki] = y1[(int) dindexos[ki]];
				}
			
							
			}

		}


		
	}


	epsilon = mepsilon;
	counter++;

	if (counter < recursivity) 								
		compute_moisan_planar_homography_n_points(i0selected,j0selected,i1selected,j1selected,nselected,niter,epsilon,H,counter,recursivity);



	desallocate_float_matrix(Haux,3,3);
	delete[] dist;
	delete[] dindexos;

	delete[] i0selected;
	delete[] j0selected;
	delete[] i1selected;
	delete[] j1selected;

}
Beispiel #2
0
/// Generic implementation of 'ORSA':
/// A Probabilistic Criterion to Detect Rigid Point Matches
///    Between Two Images and Estimate the Fundamental Matrix.
/// Bibtex :
/// @article{DBLP:journals/ijcv/MoisanS04,
///  author    = {Lionel Moisan and B{\'e}renger Stival},
///  title     = {A Probabilistic Criterion to Detect Rigid Point Matches
///    Between Two Images and Estimate the Fundamental Matrix},
///  journal   = {International Journal of Computer Vision},
///  volume    = {57},
///  number    = {3},
///  year      = {2004},
///  pages     = {201-218},
///  ee        = {http://dx.doi.org/10.1023/B:VISI.0000013094.38752.54},
///  bibsource = {DBLP, http://dblp.uni-trier.de}
///}
/// 
/// ORSA is based on an a contrario criterion of
/// inlier/outlier discrimination, is parameter free and relies on an optimized
/// random sampling procedure. It returns the log of NFA and optionally
/// the best estimated model.
///
/// \param vec_inliers Output vector of inlier indices.
/// \param nIter The number of iterations.
/// \param precision (input/output) threshold for inlier discrimination.
/// \param model The best computed model.
/// \param bVerbose Display optimization statistics.
double OrsaModel::orsa(std::vector<int> & vec_inliers,
                       size_t nIter,
                       double *precision,
                       Model *model,
                       bool bVerbose) const {
  vec_inliers.clear();

  const int sizeSample = SizeSample();
  const int nData = x1_.ncol();
  if(nData <= sizeSample)
    return std::numeric_limits<double>::infinity();

  const double maxThreshold = (precision && *precision>0)?
    *precision * *precision *N2_(0,0)*N2_(0,0): // Square max error
    std::numeric_limits<double>::infinity();

  std::vector<ErrorIndex> vec_residuals(nData); // [residual,index]
  std::vector<int> vec_sample(sizeSample); // Sample indices

  // Possible sampling indices (could change in the optimization phase)
  std::vector<int> vec_index(nData);
  for (int i = 0; i < nData; ++i)
    vec_index[i] = i;

  // Precompute log combi
  double loge0 = log10((double)NbModels() * (nData-sizeSample));
  std::vector<float> vec_logc_n, vec_logc_k;
  makelogcombi_n(nData, vec_logc_n);
  makelogcombi_k(sizeSample,nData, vec_logc_k);

  // Reserve 10% of iterations for focused sampling
  size_t nIterReserve=nIter/10;
  nIter -= nIterReserve;

  // Output parameters
  double minNFA = std::numeric_limits<double>::infinity();
  double errorMax = 0;
  int side=0;

  // Main estimation loop.
  for (size_t iter=0; iter < nIter; iter++) {
    UniformSample(sizeSample, vec_index, &vec_sample); // Get random sample

    std::vector<Model> vec_models; // Up to max_models solutions
    Fit(vec_sample, &vec_models);

    // Evaluate models
    bool better=false;
    for (size_t k = 0; k < vec_models.size(); ++k)
    {
      // Residuals computation and ordering
      for (int i = 0; i < nData; ++i)
      {
        int s;
        double error = Error(vec_models[k], i, &s);
        vec_residuals[i] = ErrorIndex(error, i, s);
      }
      std::sort(vec_residuals.begin(), vec_residuals.end());

      // Most meaningful discrimination inliers/outliers
      ErrorIndex best = bestNFA(vec_residuals, loge0, maxThreshold,
                                vec_logc_n, vec_logc_k);
      if(best.error < minNFA) // A better model was found
      {
        better = true;
        minNFA = best.error;
        side = best.side;
        vec_inliers.resize(best.index);
        for (int i=0; i<best.index; ++i)
          vec_inliers[i] = vec_residuals[i].index;
        errorMax = vec_residuals[best.index-1].error; // Error threshold
        if(best.error<0 && model) *model = vec_models[k];
        if(bVerbose)
        {
          std::cout << "  nfa=" << minNFA
                    << " inliers=" << vec_inliers.size()
                    << " precision=" << denormalizeError(errorMax, side)
                    << " im" << side+1
                    << " (iter=" << iter;
          if(best.error<0) {
            std::cout << ",sample=" << vec_sample.front();
            std::vector<int>::const_iterator it=vec_sample.begin();
            for(++it; it != vec_sample.end(); ++it)
              std::cout << ',' << *it;
          }
          std::cout << ")" <<std::endl;
        }
      }
    }
    // ORSA optimization: draw samples among best set of inliers so far
    if((better && minNFA<0) || (iter+1==nIter && nIterReserve)) {
        if(vec_inliers.empty()) { // No model found at all so far
            nIter++; // Continue to look for any model, even not meaningful
            nIterReserve--;
        } else {
            vec_index = vec_inliers;
            if(nIterReserve) {
                nIter = iter+1+nIterReserve;
                nIterReserve=0;
            }
        }
    }
  }

  if(minNFA >= 0)
    vec_inliers.clear();

  if(bConvergence)
    refineUntilConvergence(vec_logc_n, vec_logc_k, loge0,
                           maxThreshold, minNFA, model, bVerbose, vec_inliers,
                           errorMax, side);

  if(precision)
    *precision = denormalizeError(errorMax, side);
  if(model && !vec_inliers.empty())
    Unnormalize(model);
  return minNFA;
}
std::pair<double, double> ACRANSAC(const Kernel &kernel,
  std::vector<size_t> & vec_inliers,
  size_t nIter = 1024,
  typename Kernel::Model * model = NULL,
  double precision = std::numeric_limits<double>::infinity(),
  bool bVerbose = false)
{
  vec_inliers.clear();

  const size_t sizeSample = Kernel::MINIMUM_SAMPLES;
  const size_t nData = kernel.NumSamples();
  if(nData <= (size_t)sizeSample)
    return std::make_pair(0.0,0.0);

  const double maxThreshold = (precision==std::numeric_limits<double>::infinity()) ?
    std::numeric_limits<double>::infinity() :
    precision * kernel.normalizer2()(0,0) * kernel.normalizer2()(0,0);

  std::vector<ErrorIndex> vec_residuals(nData); // [residual,index]
  std::vector<double> vec_residuals_(nData);
  std::vector<size_t> vec_sample(sizeSample); // Sample indices

  // Possible sampling indices (could change in the optimization phase)
  std::vector<size_t> vec_index(nData);
  for (size_t i = 0; i < nData; ++i)
    vec_index[i] = i;

  // Precompute log combi
  double loge0 = log10((double)Kernel::MAX_MODELS * (nData-sizeSample));
  std::vector<float> vec_logc_n, vec_logc_k;
  makelogcombi_n(nData, vec_logc_n);
  makelogcombi_k(sizeSample, nData, vec_logc_k);

  // Output parameters
  double minNFA = std::numeric_limits<double>::infinity();
  double errorMax = std::numeric_limits<double>::infinity();

  // Reserve 10% of iterations for focused sampling
  size_t nIterReserve = nIter/10;
  nIter -= nIterReserve;

  // Main estimation loop.
  for (size_t iter=0; iter < nIter; ++iter) {
    UniformSample(sizeSample, vec_index, &vec_sample); // Get random sample

    std::vector<typename Kernel::Model> vec_models; // Up to max_models solutions
    kernel.Fit(vec_sample, &vec_models);

    // Evaluate models
    bool better = false;
    for (size_t k = 0; k < vec_models.size(); ++k)  {
      // Residuals computation and ordering
      kernel.Errors(vec_models[k], vec_residuals_);
      for (size_t i = 0; i < nData; ++i)  {
        const double error = vec_residuals_[i];
        vec_residuals[i] = ErrorIndex(error, i);
      }
      std::sort(vec_residuals.begin(), vec_residuals.end());

      // Most meaningful discrimination inliers/outliers
      const ErrorIndex best = bestNFA(
        sizeSample,
        kernel.logalpha0(),
        vec_residuals,
        loge0,
        maxThreshold,
        vec_logc_n,
        vec_logc_k,
        kernel.multError());

      if (best.first < minNFA /*&& vec_residuals[best.second-1].first < errorMax*/)  {
        // A better model was found
        better = true;
        minNFA = best.first;
        vec_inliers.resize(best.second);
        for (size_t i=0; i<best.second; ++i)
          vec_inliers[i] = vec_residuals[i].second;
        errorMax = vec_residuals[best.second-1].first; // Error threshold
        if(model) *model = vec_models[k];

        if(bVerbose)  {
          std::cout << "  nfa=" << minNFA
            << " inliers=" << best.second
            << " precisionNormalized=" << errorMax
            << " precision=" << kernel.unormalizeError(errorMax)
            << " (iter=" << iter;
          std::cout << ",sample=";
          std::copy(vec_sample.begin(), vec_sample.end(),
            std::ostream_iterator<size_t>(std::cout, ","));
          std::cout << ")" <<std::endl;
        }
      }
    }

    // ACRANSAC optimization: draw samples among best set of inliers so far
    if((better && minNFA<0) || (iter+1==nIter && nIterReserve)) {
      if(vec_inliers.empty()) { // No model found at all so far
        nIter++; // Continue to look for any model, even not meaningful
        nIterReserve--;
      } else {
        // ACRANSAC optimization: draw samples among best set of inliers so far
        vec_index = vec_inliers;
        if(nIterReserve) {
            nIter = iter+1+nIterReserve;
            nIterReserve=0;
        }
      }
    }
  }

  if(minNFA >= 0)
    vec_inliers.clear();

  if (!vec_inliers.empty())
  {
    if (model)
      kernel.Unnormalize(model);
    errorMax = kernel.unormalizeError(errorMax);
  }

  return std::make_pair(errorMax, minNFA);
}