Ejemplo n.º 1
0
	virtual VectorXf gradient( const MatrixXf & a, const MatrixXf & b ) const {
		if (ktype_ == CONST_KERNEL)
			return VectorXf();
		MatrixXf fg = featureGradient( a, b );
		if (ktype_ == DIAG_KERNEL)
			return (f_.array()*fg.array()).rowwise().sum();
		else {
			MatrixXf p = fg*f_.transpose();
			p.resize( p.cols()*p.rows(), 1 );
			return p;
		}
	}
Ejemplo n.º 2
0
	MatrixXf featureGradient( const MatrixXf & a, const MatrixXf & b ) const {
		if (ntype_ == NO_NORMALIZATION )
			return kernelGradient( a, b );
		else if (ntype_ == NORMALIZE_SYMMETRIC ) {
			MatrixXf fa = lattice_.compute( a*norm_.asDiagonal(), true );
			MatrixXf fb = lattice_.compute( b*norm_.asDiagonal() );
			MatrixXf ones = MatrixXf::Ones( a.rows(), a.cols() );
			VectorXf norm3 = norm_.array()*norm_.array()*norm_.array();
			MatrixXf r = kernelGradient( 0.5*( a.array()*fb.array() + fa.array()*b.array() ).matrix()*norm3.asDiagonal(), ones );
			return - r + kernelGradient( a*norm_.asDiagonal(), b*norm_.asDiagonal() );
		}
		else if (ntype_ == NORMALIZE_AFTER ) {
			MatrixXf fb = lattice_.compute( b );
			
			MatrixXf ones = MatrixXf::Ones( a.rows(), a.cols() );
			VectorXf norm2 = norm_.array()*norm_.array();
			MatrixXf r = kernelGradient( ( a.array()*fb.array() ).matrix()*norm2.asDiagonal(), ones );
			return - r + kernelGradient( a*norm_.asDiagonal(), b );
		}
		else /*if (ntype_ == NORMALIZE_BEFORE )*/ {
			MatrixXf fa = lattice_.compute( a, true );
			
			MatrixXf ones = MatrixXf::Ones( a.rows(), a.cols() );
			VectorXf norm2 = norm_.array()*norm_.array();
			MatrixXf r = kernelGradient( ( fa.array()*b.array() ).matrix()*norm2.asDiagonal(), ones );
			return -r+kernelGradient( a, b*norm_.asDiagonal() );
		}
	}
Ejemplo n.º 3
0
void normalizePts(MatrixXf &mat, Matrix3f &T) {

  float cx = mat.col(0).mean();
  float cy = mat.col(1).mean();
  mat.array().col(0) -= cx;
  mat.array().col(1) -= cy;
  
  float sqrt_2 = sqrt(2);
  float meandist = (mat.array().col(0)*mat.array().col(0) + mat.array().col(1)*mat.array().col(1)).sqrt().mean();
  float scale = sqrt_2/meandist;
  mat.leftCols<2>().array() *= scale; 

  T << scale, 0, -scale*cx, 0, scale, -scale*cy, 0, 0, 1;
}
Ejemplo n.º 4
0
VectorXf EMclustering::logsumexp(MatrixXf x, int dim)
{
	int r = x.rows();
	int c = x.cols();

	VectorXf y(r);
	MatrixXf tmp1(r,c);
	VectorXf tmp2(r);
	VectorXf s(r);

	y = x.rowwise().maxCoeff();//cerr<<"y"<<y<<endl<<endl;
	x = x.colwise() - y;	
	//cerr<<"x"<<x<<endl<<endl;
	tmp1 = x.array().exp();	
	//cerr<<"t"<<tmp1<<endl<<endl;
	tmp2 = tmp1.rowwise().sum();	
	//cerr<<"t"<<tmp2<<endl<<endl;
	s = y.array() + tmp2.array().log();

	for(int i=0;i<s.size();i++)
	{
		if(!isfinite(s(i)))
		{
			s(i) = y(i);
		}
	}

	y.resize(0);
	tmp1.resize(0,0);
	tmp2.resize(0);
	
	return s;
}
Ejemplo n.º 5
0
void MapOptimizer::precondition_direction (MatrixXf & dH) const
{
  if (logging) LOG("  preconditioning direction");

  // Transform descent direction from H(y,x) coords to log(H(y,x)) coords,
  // which are equivalent to log(J(y,x)) coords (so hereafter dH = dJ).

  const MatrixXf & H = m_conditional;

  dH.array() *= H.array();
}
Ejemplo n.º 6
0
bool singleModelRANSAC(const MatrixXf &data, int M, MatrixXf &inlier) {
  int maxdegen = 10;
  int dataSize = data.rows();
  int psize = 4;
  MatrixXf x1 = data.block(0, 0, data.rows(), 3);
  MatrixXf x2 = data.block(0, 3, data.rows(), 3);
  vector<int> sample;
  MatrixXf pts1(4, 3);
  MatrixXf pts2(4, 3);
  int maxInlier = -1;
  MatrixXf bestResidue;
  for (int m = 0; m < M; m++) {
    int degencount = 0;
    int isdegen = 1;
    while (isdegen==1 && degencount < maxdegen) {
      degencount ++;
      RandomSampling(psize, dataSize, sample);
      for (int i = 0; i < psize; i++) {
        pts1.row(i) = x1.row(sample[i]);
        pts2.row(i) = x2.row(sample[i]);
      }
      if (sampleValidTest(pts1, pts2))
          isdegen = 0;
    }
    if (isdegen) {
      cout << "Cannot find valid p-subset" << endl;
      return false;
    }
    Matrix3f local_H;
    MatrixXf local_A;
    fitHomography(pts1, pts2, local_H, local_A);

    MatrixXf residue;
    computeHomographyResidue(x1, x2, local_H, residue);
    int inlierCount = (residue.array() < THRESHOLD).count();
    if (inlierCount > maxInlier) {
      maxInlier = inlierCount;
      bestResidue = residue;
    }
  }
  inlier.resize(maxInlier, data.cols());
  int transferCounter = 0;
  for (int i = 0; i < dataSize; i++) {
    if (bestResidue(i) < THRESHOLD) {
      inlier.row(transferCounter) = data.row(i);
      transferCounter++;
    }
  }
  if (transferCounter != maxInlier) {
    cout << "RANSAC result size does not match!!!!" << endl;
    return false;
  }
  return true;
}
Ejemplo n.º 7
0
// Compute the KL-divergence of a set of marginals
double DenseCRF::klDivergence( const MatrixXf & Q ) const {
	double kl = 0;
	// Add the entropy term
	for( int i=0; i<Q.cols(); i++ )
		for( int l=0; l<Q.rows(); l++ )
			kl += Q(l,i)*log(std::max( Q(l,i), 1e-20f) );
	// Add the unary term
	if( unary_ ) {
		MatrixXf unary = unary_->get();
		for( int i=0; i<Q.cols(); i++ )
			for( int l=0; l<Q.rows(); l++ )
				kl += unary(l,i)*Q(l,i);
	}
	
	// Add all pairwise terms
	MatrixXf tmp;
	for( unsigned int k=0; k<pairwise_.size(); k++ ) {
		pairwise_[k]->apply( tmp, Q );
		kl += (Q.array()*tmp.array()).sum();
	}
	return kl;
}
Ejemplo n.º 8
0
void MapOptimizer::constrain_direction (MatrixXf & dJ, float tol) const
{
  // Enforce the simultaneous constraints
  //
  //   /\x. sum y. dJ(y,x) = 0
  //   /\y. sum x. dJ(y,x) = 0
  //
  // We combine the two constraints by iteratively weakly enforcing both:
  // Let Px,Py project to the feasible subspaces for constraints 1,2, resp.
  // Each projection has eigenvalues in {0,1}.
  // We approximate the desired projection Pxy as a linear combination of Px,Py
  //   Pxy' = 1 - alpha ((1-Px) + (1-Py))
  // which has eigenvalues in {1} u [1 - alpha, 1 - 2 alpha].
  // Hence Pxy = lim n->infty Pxy'^n, where convergence rate depends on alpha.
  // The optimal alpha is 2/3, yielding Pxy' eigenvalues in {1} u [-1/3,1/3],
  // and resulting in project_scale = -alpha below.

  if (logging) LOG("  constraining direction");

  const size_t X = dom.size;
  const size_t Y = cod.size;

  const MatrixXf & J = m_joint;
  const VectorXf & sum_y_J = m_dom_prior;
  const VectorXf & sum_x_J = m_cod_prior;
  const float sum_xy_J = m_cod_prior.sum();

  VectorXf sum_y_dJ(J.cols());
  VectorXf sum_x_dJ(J.rows());

  // this is iterative, so we hand-optimize by merging loops

  const float * restrict J_ = J.data();
  const float * restrict sum_y_J_ = sum_y_J.data();
  const float * restrict sum_x_J_ = sum_x_J.data();

  float * restrict dJ_ = dJ.data();
  float * restrict project_y_ = sum_y_dJ.data();
  float * restrict project_x_ = sum_x_dJ.data();
  const float project_scale = -2/3.0;

  Vector<float> accum_x_dJ(Y);
  float * restrict accum_x_dJ_ = accum_x_dJ;

  // accumulate first projection
  accum_x_dJ.zero();

  for (size_t x = 0; x < X; ++x) {

    const float * restrict dJ_x_ = dJ_ + Y * x;

    float accum_y_dJ = 0;

    for (size_t y = 0; y < Y; ++y) {

      float dJ_xy = dJ_x_[y];

      accum_y_dJ += dJ_xy;
      accum_x_dJ_[y] += dJ_xy;
    }

    project_y_[x] = project_scale * accum_y_dJ / sum_y_J_[x];
  }

  for (size_t y = 0; y < Y; ++y) {
    project_x_[y] = project_scale * accum_x_dJ_[y] / sum_x_J_[y];
    accum_x_dJ_[y] = 0;
  }

  // apply previous projection and accumulate next projection
  for (size_t iter = 0; iter < 100; ++iter) {

    float error = 0;

    for (size_t x = 0; x < X; ++x) {

      const float * restrict J_x_ = J_ + Y * x;
      float * restrict dJ_x_ = dJ_ + Y * x;

      float accum_y_dJ = 0;

      for (size_t y = 0; y < Y; ++y) {

        float dJ_xy = dJ_x_[y] += J_x_[y] * (project_x_[y] + project_y_[x]);

        accum_y_dJ += dJ_xy;
        accum_x_dJ_[y] += dJ_xy;
      }

      project_y_[x] = project_scale * accum_y_dJ / sum_y_J_[x];
      imax(error, max(-accum_y_dJ, accum_y_dJ));
    }

    for (size_t y = 0; y < Y; ++y) {

      float accum_x_dJ_y = accum_x_dJ_[y];
      accum_x_dJ_[y] = 0;

      project_x_[y] = project_scale * accum_x_dJ_y / sum_x_J_[y];
      imax(error, max(-accum_x_dJ_y, accum_x_dJ_y));
    }

    if (error < tol) {
      if (logging) {
        LOG("   after " << (1+iter) << " iterations, error < " << error);
      }
      break;
    }
  }

  // apply final projection
  for (size_t x = 0; x < X; ++x) {

    const float * restrict J_x_ = J_ + Y * x;
    float * restrict dJ_x_ = dJ_ + Y * x;

    for (size_t y = 0; y < Y; ++y) {
      dJ_x_[y] += J_x_[y] * (project_x_[y] + project_y_[x]);
    }
  }

  if (debug) {

    sum_y_dJ = dJ.colwise().sum();
    sum_x_dJ = dJ.rowwise().sum();
    float sum_xy_dJ = sum_x_dJ.sum();

    DEBUG("max constraint errors = "
        << sqrt(sum_x_dJ.array().square().maxCoeff())<< ", "
        << sqrt(sum_y_dJ.array().square().maxCoeff())<< ", "
        << sum_xy_dJ);

    sum_y_dJ.array() /= sum_y_J.array();
    sum_x_dJ.array() /= sum_x_J.array();
    sum_xy_dJ /= sum_xy_J;

    DEBUG("max relative constraints errors = "
        << sqrt(sum_x_dJ.array().square().maxCoeff()) << ", "
        << sqrt(sum_y_dJ.array().square().maxCoeff()) << ", "
        << sum_xy_dJ);

    DEBUG("max(|dJ|) = " << dJ.array().abs().maxCoeff()
        << ", rms(dJ) = " << sqrt(dJ.array().square().mean()));
    DEBUG("max(J) / min(J) = " << (J.maxCoeff() / J.minCoeff()));
    DEBUG("max(sum x. J) / min(sum x. J) = "
        << (sum_x_J.maxCoeff() / sum_x_J.minCoeff()));
    DEBUG("max(sum y. J) / min(sum y. J) = "
        << (sum_y_J.maxCoeff() / sum_y_J.minCoeff()));
  }
}
Ejemplo n.º 9
0
int main(void)
{
  cout << "Eigen v" << EIGEN_WORLD_VERSION << "." << EIGEN_MAJOR_VERSION << "." << EIGEN_MINOR_VERSION << endl;
  static const int R = 288;
  static const int N = R*(R+1)/2;
  static const int M = 63;
  static const int HALF_M = M/2;
  static const float nsigma = 2.5f;

  MatrixXf data = MatrixXf::Random(M, N);
  MatrixXf mask = MatrixXf::Zero(M, N);
  MatrixXf result = MatrixXf::Zero(1, N);
  VectorXf std = VectorXf::Zero(N);
  VectorXf centroid = VectorXf::Zero(N);
  VectorXf mean = VectorXf::Zero(N);
  VectorXf minval = VectorXf::Zero(N);
  VectorXf maxval = VectorXf::Zero(N);

  cout << "computing..." << flush;
  double t = GetRealTime();

  // computes the exact median
  if (M&1)
  {
#pragma omp parallel for
    for (int i = 0; i < N; i++)
    {
      vector<float> row(data.data()+i*M, data.data()+(i+1)*M);
      nth_element(row.begin(), row.begin()+HALF_M, row.end());
      centroid(i) = row[HALF_M];
    }
  }
  // nth_element guarantees x_0,...,x_{n-1} < x_n
  else
  {
#pragma omp parallel for
    for (int i = 0; i < N; i++)
    {
      vector<float> row(data.data()+i*M, data.data()+(i+1)*M);
      nth_element(row.begin(), row.begin()+HALF_M, row.end());
      centroid(i) = row[HALF_M];
      centroid(i) += *max_element(row.begin(), row.begin()+HALF_M);
      centroid(i) *= 0.5f;
    }
  }

  // compute the mean
  mean = data.colwise().mean();

  // compute std (x) = sqrt ( 1/N SUM_i (x(i) - mean(x))^2 )
  std = (((data.rowwise() - mean.transpose()).array().square()).colwise().sum() *
         (1.0f / M))
            .array()
            .sqrt();

  // compute n sigmas from centroid
  minval = centroid - std * nsigma;
  maxval = centroid + std * nsigma;
  
  // compute clip mask
  for (int i = 0; i < N; i++)
  {
    mask.col(i) = (data.col(i).array() > minval(i)).select(VectorXf::Ones(M), 0.0f);
    mask.col(i) = (data.col(i).array() < maxval(i)).select(VectorXf::Ones(M), 0.0f);
  }

  // apply clip mask to data
  data.array() *= mask.array();

  // compute mean such that we ignore clipped data, this is our final result
  result = data.colwise().sum().array() / mask.colwise().sum().array();

  t = GetRealTime() - t;
  cout << "[done]" << endl << endl;

  size_t bytes = data.size()*sizeof(float);
  cout << "data: " << M << "x" << N << endl;
  cout << "size: " << bytes*1e-6f << " MB" << endl;
  cout << "rate: " << bytes/(1e6f*t) << " MB/s" << endl;
  cout << "time: " << t << " s" << endl;

  return 0;
}