ACKernelAdaptor( const Mat &x1, int w1, int h1, const Mat &x2, int w2, int h2, bool bPointToLine = true) : x1_(x1.rows(), x1.cols()), x2_(x2.rows(), x2.cols()), N1_(3,3), N2_(3,3), logalpha0_(0.0), bPointToLine_(bPointToLine) { assert(2 == x1_.rows()); assert(x1_.rows() == x2_.rows()); assert(x1_.cols() == x2_.cols()); NormalizePoints(x1, &x1_, &N1_, w1, h1); NormalizePoints(x2, &x2_, &N2_, w2, h2); // LogAlpha0 is used to make error data scale invariant if(bPointToLine) { // Ratio of containing diagonal image rectangle over image area double D = sqrt(w2*(double)w2 + h2*(double)h2); // diameter double A = w2*(double)h2; // area logalpha0_ = log10(2.0*D/A /N2_(0,0)); } else { // ratio of area : unit circle over image area logalpha0_ = log10(M_PI/(w2*(double)h2) /(N2_(0,0)*N2_(0,0))); } }
/// Constructor, initializing \c logalpha0_ HomographyModel::HomographyModel(const Mat &x1, int w1, int h1, const Mat &x2, int w2, int h2, bool symError) : OrsaModel(x1, w1, h1, x2, w2, h2), symError_(symError) { logalpha0_[0] = log10(M_PI/(w1*(double)h1) /(N1_(0,0)*N1_(0,0))); logalpha0_[1] = log10(M_PI/(w2*(double)h2) /(N2_(0,0)*N2_(0,0))); }
/// Constructor, computing logalpha0_ FundamentalModel::FundamentalModel(const Mat &x1, int w1, int h1, const Mat &x2, int w2, int h2, bool symError) : OrsaModel(x1, w1, h1, x2, w2, h2), symError_(symError) { double D, A; // Diameter and area of image D = sqrt(w1*(double)w1 + h1*(double)h1); A = w1*(double)h1; logalpha0_[0] = log10(2.0*D/A /N1_(0,0)); D = sqrt(w2*(double)w2 + h2*(double)h2); A = w2*(double)h2; logalpha0_[1] = log10(2.0*D/A /N2_(0,0)); }
std::vector<double> SOElement::getc(std::size_t ielem) const { auto const xl = coords_[lnods_[ielem][1]] - coords_[lnods_[ielem][0]]; std::vector<double> c(ntnoel_); c[0] = gl_.qgauss( myfunctional::make_functional( [this, ielem](double r) { return - N1_(r) * func_(N1_(r) * coords_[lnods_[ielem][0]] + N2_(r) * coords_[lnods_[ielem][1]] + N3_(r) * coords_[lnods_[ielem][2]]); }), -1.0, 1.0) * xl * 0.5; c[1] = gl_.qgauss( myfunctional::make_functional([this, ielem](double r) { return - N2_(r) * func_(N1_(r) * coords_[lnods_[ielem][0]] + N2_(r) * coords_[lnods_[ielem][1]] + N3_(r) * coords_[lnods_[ielem][2]]); }), -1.0, 1.0) * xl * 0.5; c[2] = gl_.qgauss( myfunctional::make_functional([this, ielem](double r) { return - N3_(r) * func_(N1_(r) * coords_[lnods_[ielem][0]] + N2_(r) * coords_[lnods_[ielem][1]] + N3_(r) * coords_[lnods_[ielem][2]]); }), -1.0, 1.0) * xl * 0.5; return c; }
/// Generic implementation of 'ORSA': /// A Probabilistic Criterion to Detect Rigid Point Matches /// Between Two Images and Estimate the Fundamental Matrix. /// Bibtex : /// @article{DBLP:journals/ijcv/MoisanS04, /// author = {Lionel Moisan and B{\'e}renger Stival}, /// title = {A Probabilistic Criterion to Detect Rigid Point Matches /// Between Two Images and Estimate the Fundamental Matrix}, /// journal = {International Journal of Computer Vision}, /// volume = {57}, /// number = {3}, /// year = {2004}, /// pages = {201-218}, /// ee = {http://dx.doi.org/10.1023/B:VISI.0000013094.38752.54}, /// bibsource = {DBLP, http://dblp.uni-trier.de} ///} /// /// ORSA is based on an a contrario criterion of /// inlier/outlier discrimination, is parameter free and relies on an optimized /// random sampling procedure. It returns the log of NFA and optionally /// the best estimated model. /// /// \param vec_inliers Output vector of inlier indices. /// \param nIter The number of iterations. /// \param precision (input/output) threshold for inlier discrimination. /// \param model The best computed model. /// \param bVerbose Display optimization statistics. double OrsaModel::orsa(std::vector<int> & vec_inliers, size_t nIter, double *precision, Model *model, bool bVerbose) const { vec_inliers.clear(); const int sizeSample = SizeSample(); const int nData = x1_.ncol(); if(nData <= sizeSample) return std::numeric_limits<double>::infinity(); const double maxThreshold = (precision && *precision>0)? *precision * *precision *N2_(0,0)*N2_(0,0): // Square max error std::numeric_limits<double>::infinity(); std::vector<ErrorIndex> vec_residuals(nData); // [residual,index] std::vector<int> vec_sample(sizeSample); // Sample indices // Possible sampling indices (could change in the optimization phase) std::vector<int> vec_index(nData); for (int i = 0; i < nData; ++i) vec_index[i] = i; // Precompute log combi double loge0 = log10((double)NbModels() * (nData-sizeSample)); std::vector<float> vec_logc_n, vec_logc_k; makelogcombi_n(nData, vec_logc_n); makelogcombi_k(sizeSample,nData, vec_logc_k); // Reserve 10% of iterations for focused sampling size_t nIterReserve=nIter/10; nIter -= nIterReserve; // Output parameters double minNFA = std::numeric_limits<double>::infinity(); double errorMax = 0; int side=0; // Main estimation loop. for (size_t iter=0; iter < nIter; iter++) { UniformSample(sizeSample, vec_index, &vec_sample); // Get random sample std::vector<Model> vec_models; // Up to max_models solutions Fit(vec_sample, &vec_models); // Evaluate models bool better=false; for (size_t k = 0; k < vec_models.size(); ++k) { // Residuals computation and ordering for (int i = 0; i < nData; ++i) { int s; double error = Error(vec_models[k], i, &s); vec_residuals[i] = ErrorIndex(error, i, s); } std::sort(vec_residuals.begin(), vec_residuals.end()); // Most meaningful discrimination inliers/outliers ErrorIndex best = bestNFA(vec_residuals, loge0, maxThreshold, vec_logc_n, vec_logc_k); if(best.error < minNFA) // A better model was found { better = true; minNFA = best.error; side = best.side; vec_inliers.resize(best.index); for (int i=0; i<best.index; ++i) vec_inliers[i] = vec_residuals[i].index; errorMax = vec_residuals[best.index-1].error; // Error threshold if(best.error<0 && model) *model = vec_models[k]; if(bVerbose) { std::cout << " nfa=" << minNFA << " inliers=" << vec_inliers.size() << " precision=" << denormalizeError(errorMax, side) << " im" << side+1 << " (iter=" << iter; if(best.error<0) { std::cout << ",sample=" << vec_sample.front(); std::vector<int>::const_iterator it=vec_sample.begin(); for(++it; it != vec_sample.end(); ++it) std::cout << ',' << *it; } std::cout << ")" <<std::endl; } } } // ORSA optimization: draw samples among best set of inliers so far if((better && minNFA<0) || (iter+1==nIter && nIterReserve)) { if(vec_inliers.empty()) { // No model found at all so far nIter++; // Continue to look for any model, even not meaningful nIterReserve--; } else { vec_index = vec_inliers; if(nIterReserve) { nIter = iter+1+nIterReserve; nIterReserve=0; } } } } if(minNFA >= 0) vec_inliers.clear(); if(bConvergence) refineUntilConvergence(vec_logc_n, vec_logc_k, loge0, maxThreshold, minNFA, model, bVerbose, vec_inliers, errorMax, side); if(precision) *precision = denormalizeError(errorMax, side); if(model && !vec_inliers.empty()) Unnormalize(model); return minNFA; }
/// Denormalize error, recover real error in pixels. double OrsaModel::denormalizeError(double squareError, int side) const { return sqrt(squareError)/(side==0? N1_(0,0): N2_(0,0)); }
double unormalizeError(double val) const {return sqrt(val) / N2_(0,0);}