int main(int, char**)
{
  cout.precision(3);
  SelfAdjointEigenSolver<Matrix4f> es;
Matrix4f X = Matrix4f::Random(4,4);
Matrix4f A = X + X.transpose();
es.compute(A);
cout << "The eigenvalues of A are: " << es.eigenvalues().transpose() << endl;
es.compute(A + Matrix4f::Identity(4,4)); // re-use es to compute eigenvalues of A+I
cout << "The eigenvalues of A+I are: " << es.eigenvalues().transpose() << endl;

  return 0;
}
Пример #2
0
void CMT::WhiteningTransform::initialize(const ArrayXXd& input, int dimOut) {
	if(input.cols() < input.rows())
		throw Exception("Too few inputs to compute whitening transform."); 

	mMeanIn = input.rowwise().mean();

	// compute covariances
	MatrixXd covXX = covariance(input);

	// input whitening
	SelfAdjointEigenSolver<MatrixXd> eigenSolver;

	eigenSolver.compute(covXX);

	Array<double, 1, Dynamic> eigenvalues = eigenSolver.eigenvalues();
	MatrixXd eigenvectors = eigenSolver.eigenvectors();

	// don't whiten directions with near-zero variance
	for(int i = 0; i < eigenvalues.size(); ++i)
		if(eigenvalues[i] < 1e-7)
			eigenvalues[i] = 1.;

	mPreIn = (eigenvectors.array().rowwise() * eigenvalues.sqrt().cwiseInverse()).matrix()
		* eigenvectors.transpose();
	mPreInInv = (eigenvectors.array().rowwise() * eigenvalues.sqrt()).matrix()
		* eigenvectors.transpose();

	mMeanOut = VectorXd::Zero(dimOut);
	mPreOut = MatrixXd::Identity(dimOut, dimOut);
	mPreOutInv = MatrixXd::Identity(dimOut, dimOut);
	mPredictor = MatrixXd::Zero(dimOut, input.rows());
	mGradTransform = MatrixXd::Zero(dimOut, input.rows());
	mLogJacobian = 1.;
}
Пример #3
0
void ctBdG:: Initialize_Euabv(){
	SelfAdjointEigenSolver<MatrixXcd> ces;
	distribution(_NK2);
    int nkx, nky, nk;
    for (int ig = 0; ig < _size; ++ig) {
		if (ig==_rank) {
			_bdg_E.resize(recvcount,4);
			_bdg_u.resize(recvcount,4);
			_bdg_a.resize(recvcount,4);
			_bdg_b.resize(recvcount,4);
			_bdg_v.resize(recvcount,4);
			local_Delta_k_r = new double[recvcount];
			local_Delta_k_i = new double[recvcount];
			for (int i = 0; i < recvcount; ++i) {

				nk = recvbuf[i];
				nkx = nk%_NK;
				nky = int(nk/_NK);

				construct_BdG(_gauss_k[nkx],_gauss_k[nky], _mu, _hi);
				ces.compute(_bdg);

				_bdg_E.row(i) = ces.eigenvalues();
				_bdg_u.row(i) = ces.eigenvectors().row(0);
				_bdg_a.row(i) = ces.eigenvectors().row(1);
				_bdg_b.row(i) = ces.eigenvectors().row(2);
				_bdg_v.row(i) = ces.eigenvectors().row(3);
			}
		}
	}
}
Пример #4
0
void MaterialFitting::solveByNNLS(){

  const SXMatrix M1 = assembleObjMatrix();
  vector<SX> smooth_funs;
  computeSmoothObjFunctions(smooth_funs);
  
  SXMatrix obj_fun_m(M1.size1()*M1.size2()+smooth_funs.size(),1);
  for (int i = 0; i < M1.size1(); ++i){
    for (int j = 0; j < M1.size2(); ++j)
	  obj_fun_m.elem(i*M1.size2()+j,0) = M1.elem(i,j);
  }
  for (int i = 0; i < smooth_funs.size(); ++i){
    obj_fun_m.elem(M1.size2()*M1.size2()+i,0) = smooth_funs[i];
  }

  VSX variable_x;
  initAllVariables(variable_x);
  CasADi::SXFunction g_fun = CasADi::SXFunction(variable_x,obj_fun_m);
  g_fun.init();
  
  VectorXd x0(variable_x.size()), b;
  x0.setZero();
  CASADI::evaluate(g_fun, x0, b);
  b = -b;

  MatrixXd J = CASADI::convert<double>(g_fun.jac());
  assert_eq(J.rows(), b.size());
  assert_eq(J.cols(), x0.size());
  
  VectorXd x(b.size()),  w(J.cols()), zz(J.rows());
  VectorXi index(J.cols()*2);
  getInitValue(x);
  
  int exit_code = 0;
  double residual = 1e-18;
  nnls(&J(0,0), J.rows(), J.rows(), J.cols(), 
  	   &b[0], &x[0], &residual,
  	   &w[0], &zz[0], &index[0], &exit_code);
  INFO_LOG("residual: " << residual);
  print_NNLS_exit_code(exit_code);

  rlst.resize(x.size());
  for (int i = 0; i < x.size(); ++i){
    rlst[i] = x[i];
  }

  const MatrixXd H = J.transpose()*J;
  SelfAdjointEigenSolver<MatrixXd> es;
  es.compute(H);
  cout << "The eigenvalues of H are: " << es.eigenvalues().transpose() << endl;
}
Пример #5
0
bool IsotropicMaxStressDamage(const Matrix3d S, const double maxStress) {

        /*
         * compute Eigenvalues of strain matrix
         */
        SelfAdjointEigenSolver < Matrix3d > es;
        es.compute(S); // compute eigenvalue and eigenvectors of strain

        double max_eigenvalue = es.eigenvalues().maxCoeff();

        if (max_eigenvalue > maxStress) {
                return true;
        } else {
                return false;
        }
}
Пример #6
0
void MaterialFitting::solveByLinearSolver(){
  
  MatrixXd H;
  VectorXd g;
  hessGrad(H,g);

  SelfAdjointEigenSolver<MatrixXd> es;
  es.compute(H);
  cout << "H.norm() = " << H.norm() << endl;
  cout << "The eigenvalues of H are: \n" << es.eigenvalues().transpose() << endl;

  const VectorXd x = H.lu().solve(-g);
  rlst.resize(x.size());
  for (int i = 0; i < x.size(); ++i)
    rlst[i] = x[i];
  cout<< "\n(H*x+g).norm() = " << (H*x+g).norm() << "\n\n";
}
Пример #7
0
void drwnNNGraphMLearner::subGradientStep(const MatrixXd& G, double alpha)
{
    DRWN_FCN_TIC;

    // gradient step
    _M -= alpha * G;

    // project onto psd
    SelfAdjointEigenSolver<MatrixXd> es;
    es.compute(_M);

    const VectorXd d = es.eigenvalues().real();
    if ((d.array() < 0.0).any()) {
        const MatrixXd V = es.eigenvectors();
        _M = V * d.cwiseMax(VectorXd::Constant(d.rows(), DRWN_EPSILON)).asDiagonal() * V.inverse();
    }

    DRWN_FCN_TOC;
}
template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
{
  typedef typename MatrixType::Index Index;
  /* this test covers the following files:
     EigenSolver.h, SelfAdjointEigenSolver.h (and indirectly: Tridiagonalization.h)
  */
  Index rows = m.rows();
  Index cols = m.cols();

  typedef typename MatrixType::Scalar Scalar;
  typedef typename NumTraits<Scalar>::Real RealScalar;

  RealScalar largerEps = 10*test_precision<RealScalar>();

  MatrixType a = MatrixType::Random(rows,cols);
  MatrixType a1 = MatrixType::Random(rows,cols);
  MatrixType symmA =  a.adjoint() * a + a1.adjoint() * a1;
  MatrixType symmC = symmA;
  
  svd_fill_random(symmA,Symmetric);

  symmA.template triangularView<StrictlyUpper>().setZero();
  symmC.template triangularView<StrictlyUpper>().setZero();

  MatrixType b = MatrixType::Random(rows,cols);
  MatrixType b1 = MatrixType::Random(rows,cols);
  MatrixType symmB = b.adjoint() * b + b1.adjoint() * b1;
  symmB.template triangularView<StrictlyUpper>().setZero();
  
  CALL_SUBTEST( selfadjointeigensolver_essential_check(symmA) );

  SelfAdjointEigenSolver<MatrixType> eiSymm(symmA);
  // generalized eigen pb
  GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmC, symmB);

  SelfAdjointEigenSolver<MatrixType> eiSymmNoEivecs(symmA, false);
  VERIFY_IS_EQUAL(eiSymmNoEivecs.info(), Success);
  VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());
  
  // generalized eigen problem Ax = lBx
  eiSymmGen.compute(symmC, symmB,Ax_lBx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
          symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));

  // generalized eigen problem BAx = lx
  eiSymmGen.compute(symmC, symmB,BAx_lx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmB.template selfadjointView<Lower>() * (symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
         (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));

  // generalized eigen problem ABx = lx
  eiSymmGen.compute(symmC, symmB,ABx_lx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmC.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
         (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));


  eiSymm.compute(symmC);
  MatrixType sqrtSymmA = eiSymm.operatorSqrt();
  VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
  VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());

  MatrixType id = MatrixType::Identity(rows, cols);
  VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));

  SelfAdjointEigenSolver<MatrixType> eiSymmUninitialized;
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.info());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvalues());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());

  eiSymmUninitialized.compute(symmA, false);
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());

  // test Tridiagonalization's methods
  Tridiagonalization<MatrixType> tridiag(symmC);
  VERIFY_IS_APPROX(tridiag.diagonal(), tridiag.matrixT().diagonal());
  VERIFY_IS_APPROX(tridiag.subDiagonal(), tridiag.matrixT().template diagonal<-1>());
  Matrix<RealScalar,Dynamic,Dynamic> T = tridiag.matrixT();
  if(rows>1 && cols>1) {
    // FIXME check that upper and lower part are 0:
    //VERIFY(T.topRightCorner(rows-2, cols-2).template triangularView<Upper>().isZero());
  }
  VERIFY_IS_APPROX(tridiag.diagonal(), T.diagonal());
  VERIFY_IS_APPROX(tridiag.subDiagonal(), T.template diagonal<1>());
  VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
  VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT() * tridiag.matrixQ().adjoint());
  
  // Test computation of eigenvalues from tridiagonal matrix
  if(rows > 1)
  {
    SelfAdjointEigenSolver<MatrixType> eiSymmTridiag;
    eiSymmTridiag.computeFromTridiagonal(tridiag.matrixT().diagonal(), tridiag.matrixT().diagonal(-1), ComputeEigenvectors);
    VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmTridiag.eigenvalues());
    VERIFY_IS_APPROX(tridiag.matrixT(), eiSymmTridiag.eigenvectors().real() * eiSymmTridiag.eigenvalues().asDiagonal() * eiSymmTridiag.eigenvectors().real().transpose());
  }

  if (rows > 1)
  {
    // Test matrix with NaN
    symmC(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
    SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmC);
    VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
  }
}
template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
{
  typedef typename MatrixType::Index Index;
  /* this test covers the following files:
     EigenSolver.h, SelfAdjointEigenSolver.h (and indirectly: Tridiagonalization.h)
  */
  Index rows = m.rows();
  Index cols = m.cols();

  typedef typename MatrixType::Scalar Scalar;
  typedef typename NumTraits<Scalar>::Real RealScalar;
  typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
  typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealVectorType;
  typedef typename std::complex<typename NumTraits<typename MatrixType::Scalar>::Real> Complex;

  RealScalar largerEps = 10*test_precision<RealScalar>();

  MatrixType a = MatrixType::Random(rows,cols);
  MatrixType a1 = MatrixType::Random(rows,cols);
  MatrixType symmA =  a.adjoint() * a + a1.adjoint() * a1;
  symmA.template triangularView<StrictlyUpper>().setZero();

  MatrixType b = MatrixType::Random(rows,cols);
  MatrixType b1 = MatrixType::Random(rows,cols);
  MatrixType symmB = b.adjoint() * b + b1.adjoint() * b1;
  symmB.template triangularView<StrictlyUpper>().setZero();

  SelfAdjointEigenSolver<MatrixType> eiSymm(symmA);
  // generalized eigen pb
  GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmA, symmB);

  #ifdef HAS_GSL
  if (internal::is_same<RealScalar,double>::value)
  {
    // restore symmA and symmB.
    symmA = MatrixType(symmA.template selfadjointView<Lower>());
    symmB = MatrixType(symmB.template selfadjointView<Lower>());
    typedef GslTraits<Scalar> Gsl;
    typename Gsl::Matrix gEvec=0, gSymmA=0, gSymmB=0;
    typename GslTraits<RealScalar>::Vector gEval=0;
    RealVectorType _eval;
    MatrixType _evec;
    convert<MatrixType>(symmA, gSymmA);
    convert<MatrixType>(symmB, gSymmB);
    convert<MatrixType>(symmA, gEvec);
    gEval = GslTraits<RealScalar>::createVector(rows);

    Gsl::eigen_symm(gSymmA, gEval, gEvec);
    convert(gEval, _eval);
    convert(gEvec, _evec);

    // test gsl itself !
    VERIFY((symmA * _evec).isApprox(_evec * _eval.asDiagonal(), largerEps));

    // compare with eigen
    VERIFY_IS_APPROX(_eval, eiSymm.eigenvalues());
    VERIFY_IS_APPROX(_evec.cwiseAbs(), eiSymm.eigenvectors().cwiseAbs());

    // generalized pb
    Gsl::eigen_symm_gen(gSymmA, gSymmB, gEval, gEvec);
    convert(gEval, _eval);
    convert(gEvec, _evec);
    // test GSL itself:
    VERIFY((symmA * _evec).isApprox(symmB * (_evec * _eval.asDiagonal()), largerEps));

    // compare with eigen
    MatrixType normalized_eivec = eiSymmGen.eigenvectors()*eiSymmGen.eigenvectors().colwise().norm().asDiagonal().inverse();
    VERIFY_IS_APPROX(_eval, eiSymmGen.eigenvalues());
    VERIFY_IS_APPROX(_evec.cwiseAbs(), normalized_eivec.cwiseAbs());

    Gsl::free(gSymmA);
    Gsl::free(gSymmB);
    GslTraits<RealScalar>::free(gEval);
    Gsl::free(gEvec);
  }
  #endif

  VERIFY_IS_EQUAL(eiSymm.info(), Success);
  VERIFY((symmA.template selfadjointView<Lower>() * eiSymm.eigenvectors()).isApprox(
          eiSymm.eigenvectors() * eiSymm.eigenvalues().asDiagonal(), largerEps));
  VERIFY_IS_APPROX(symmA.template selfadjointView<Lower>().eigenvalues(), eiSymm.eigenvalues());

  SelfAdjointEigenSolver<MatrixType> eiSymmNoEivecs(symmA, false);
  VERIFY_IS_EQUAL(eiSymmNoEivecs.info(), Success);
  VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());

  // generalized eigen problem Ax = lBx
  eiSymmGen.compute(symmA, symmB,Ax_lBx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
          symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));

  // generalized eigen problem BAx = lx
  eiSymmGen.compute(symmA, symmB,BAx_lx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmB.template selfadjointView<Lower>() * (symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
         (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));

  // generalized eigen problem ABx = lx
  eiSymmGen.compute(symmA, symmB,ABx_lx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmA.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
         (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));


  MatrixType sqrtSymmA = eiSymm.operatorSqrt();
  VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
  VERIFY_IS_APPROX(sqrtSymmA, symmA.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());

  MatrixType id = MatrixType::Identity(rows, cols);
  VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));

  SelfAdjointEigenSolver<MatrixType> eiSymmUninitialized;
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.info());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvalues());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());

  eiSymmUninitialized.compute(symmA, false);
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());

  // test Tridiagonalization's methods
  Tridiagonalization<MatrixType> tridiag(symmA);
  // FIXME tridiag.matrixQ().adjoint() does not work
  VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());

  if (rows > 1)
  {
    // Test matrix with NaN
    symmA(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
    SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmA);
    VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
  }
}
Пример #10
0
CFeatures* CJade::apply(CFeatures* features)
{
	ASSERT(features);
	SG_REF(features);

	SGMatrix<float64_t> X = ((CDenseFeatures<float64_t>*)features)->get_feature_matrix();

	int n = X.num_rows;
	int T = X.num_cols;
	int m = n;

	Map<MatrixXd> EX(X.matrix,n,T);

	// Mean center X
	VectorXd mean = (EX.rowwise().sum() / (float64_t)T);
	MatrixXd SPX = EX.colwise() - mean;

	MatrixXd cov = (SPX * SPX.transpose()) / (float64_t)T;

	#ifdef DEBUG_JADE
	std::cout << "cov" << std::endl;
	std::cout << cov << std::endl;
	#endif

	// Whitening & Projection onto signal subspace
	SelfAdjointEigenSolver<MatrixXd> eig;
	eig.compute(cov);

	#ifdef DEBUG_JADE
	std::cout << "eigenvectors" << std::endl;
	std::cout << eig.eigenvectors() << std::endl;

	std::cout << "eigenvalues" << std::endl;
	std::cout << eig.eigenvalues().asDiagonal() << std::endl;
	#endif

	// Scaling
	VectorXd scales = eig.eigenvalues().cwiseSqrt();
	MatrixXd B = scales.cwiseInverse().asDiagonal() * eig.eigenvectors().transpose();

	#ifdef DEBUG_JADE
	std::cout << "whitener" << std::endl;
	std::cout << B << std::endl;
	#endif

	// Sphering
	SPX = B * SPX;

	// Estimation of the cumulant matrices
	int dimsymm = (m * ( m + 1)) / 2; // Dim. of the space of real symm matrices
	int nbcm = dimsymm; //  number of cumulant matrices
	m_cumulant_matrix = SGMatrix<float64_t>(m,m*nbcm);	// Storage for cumulant matrices
	Map<MatrixXd> CM(m_cumulant_matrix.matrix,m,m*nbcm);
	MatrixXd R(m,m); R.setIdentity();
	MatrixXd Qij = MatrixXd::Zero(m,m); // Temp for a cum. matrix
	VectorXd Xim = VectorXd::Zero(m); // Temp
	VectorXd Xjm = VectorXd::Zero(m); // Temp
	VectorXd Xijm = VectorXd::Zero(m); // Temp
	int Range = 0;

	for (int im = 0; im < m; im++)
	{
		Xim = SPX.row(im);
		Xijm = Xim.cwiseProduct(Xim);
		Qij = SPX.cwiseProduct(Xijm.replicate(1,m).transpose()) * SPX.transpose() / (float)T - R - 2*R.col(im)*R.col(im).transpose();
		CM.block(0,Range,m,m) = Qij;
		Range = Range + m;
		for (int jm = 0; jm < im; jm++)
		{
			Xjm = SPX.row(jm);
			Xijm = Xim.cwiseProduct(Xjm);
			Qij = SPX.cwiseProduct(Xijm.replicate(1,m).transpose()) * SPX.transpose() / (float)T - R.col(im)*R.col(jm).transpose() - R.col(jm)*R.col(im).transpose();
			CM.block(0,Range,m,m) =  sqrt(2)*Qij;
			Range = Range + m;
		}
	}

	#ifdef DEBUG_JADE
	std::cout << "cumulatant matrices" << std::endl;
	std::cout << CM << std::endl;
	#endif

	// Stack cumulant matrix into ND Array
	index_t * M_dims = SG_MALLOC(index_t, 3);
	M_dims[0] = m;
	M_dims[1] = m;
	M_dims[2] = nbcm;
	SGNDArray< float64_t > M(M_dims, 3);

	for (int i = 0; i < nbcm; i++)
	{
		Map<MatrixXd> EM(M.get_matrix(i),m,m);
		EM = CM.block(0,i*m,m,m);
	}

	// Diagonalize
	SGMatrix<float64_t> Q = CJADiagOrth::diagonalize(M, m_mixing_matrix, tol, max_iter);
	Map<MatrixXd> EQ(Q.matrix,m,m);
	EQ = -1 * EQ.inverse();

	#ifdef DEBUG_JADE
	std::cout << "diagonalizer" << std::endl;
	std::cout << EQ << std::endl;
	#endif

	// Separating matrix
	SGMatrix<float64_t> sep_matrix = SGMatrix<float64_t>(m,m);
	Map<MatrixXd> C(sep_matrix.matrix,m,m);
	C = EQ.transpose() * B;

	// Sort
	VectorXd A = (B.inverse()*EQ).cwiseAbs2().colwise().sum();
	bool swap = false;
	do
	{
		swap = false;
		for (int j = 1; j < n; j++)
		{
			if ( A(j) < A(j-1) )
			{
				std::swap(A(j),A(j-1));
				C.col(j).swap(C.col(j-1));
				swap = true;
			}
		}

	} while(swap);

	for (int j = 0; j < m/2; j++)
		C.row(j).swap( C.row(m-1-j) );

	// Fix Signs
	VectorXd signs = VectorXd::Zero(m);
	for (int i = 0; i < m; i++)
	{
		if( C(i,0) < 0 )
			signs(i) = -1;
		else
			signs(i) = 1;
	}
	C = signs.asDiagonal() * C;

	#ifdef DEBUG_JADE
	std::cout << "un mixing matrix" << std::endl;
	std::cout << C << std::endl;
	#endif

	// Unmix
	EX = C * EX;

	m_mixing_matrix = SGMatrix<float64_t>(m,m);
	Map<MatrixXd> Emixing_matrix(m_mixing_matrix.matrix,m,m);
	Emixing_matrix = C.inverse();

	return features;
}
Пример #11
0
bool HSpatialDivision2::divide(int target_count)
{
	/* - variables - */

	HSDVertexCluster2 vc;
	int i, lastClusterCount = 1, continuousUnchangeCount = 0;
	float diff;

	// maximum/minimum curvature and the direction
	float maxCurvature; // maximum curvature
	float minCurvature; // minimum curvature
	HNormal maxDir; // maximum curvature direction
	HNormal minDir; // maximum curvature direction

	SelfAdjointEigenSolver<Matrix3f> eigensolver;
	solver = &eigensolver;
	Matrix3f M;

	htime.setCheckPoint();

	/* - routines - */

	// init the first cluster
	for (i = 0; i < vertexCount; i ++) 
		if (vertices[i].clusterIndex != INVALID_CLUSTER_INDEX) 
			vc.addVertex(i, vertices[i]);

	cout << "\t-----------------------------------------------" << endl 
		 << "\tnon-referenced vertices count:\t" << vertexCount - vc.vIndices->size() << endl
		 << "\tminimum normal-vari factor:\t" << HSDVertexCluster2::MINIMUM_NORMAL_VARI << endl
		 << "\tvalid vertices count:\t" << vc.vIndices->size() << endl;

	flog << "\t-----------------------------------------------" << endl
		 << "\tnon-referenced vertices count:\t" << vertexCount - vc.vIndices->size() << endl
		 << "\tminimum normal-vari factor:\t" << HSDVertexCluster2::MINIMUM_NORMAL_VARI << endl
		 << "\tvalid vertices count:\t" << vc.vIndices->size() << endl;

	for (i = 0; i < faceCount; i ++) {
		vc.addFace(i);
	}
	vc.importance = vc.getImportance();
	clusters.addElement(vc);

	vector<int> index; // index of eigenvalues in eigensolver.eigenvalues()
	index.push_back(0);
	index.push_back(1);
	index.push_back(2);

	// subdivide until the divided clusters reach the target count
	while(clusters.count() < target_count)
	{
		//diff = ((float)target_count - (float)clusters.count()) / target_count;
		//if (diff < 0.01) {
		//	break;
		//}

		if (clusters.count() == lastClusterCount) {
			continuousUnchangeCount ++;
		}
		else {
			continuousUnchangeCount = 0;
		}

		if (continuousUnchangeCount >= 50) {
			cout << "\tstop without reaching the target count because of unchanged cluster count" << endl;
			flog << "\tstop without reaching the target count because of unchanged cluster count" << endl;
			break;
		}
		
		if (clusters.empty()) {
			cerr << "#error: don't know why but the clusters heap have came to empty" << endl;
			flog << "#error: don't know why but the clusters heap have came to empty" << endl;
			return false;
		}

		// get the value of the top in the heap of clusters and delete it
		lastClusterCount = clusters.count();
		vc = clusters.getTop();
 		clusters.deleteTop();

		//PrintHeap(fout, clusters);

		// get the eigenvalue
		M << vc.awQ.a11, vc.awQ.a12, vc.awQ.a13,
			 vc.awQ.a12, vc.awQ.a22, vc.awQ.a23,
			 vc.awQ.a13, vc.awQ.a23, vc.awQ.a33;
		eigensolver.compute(M);
		if (eigensolver.info() != Success) {
			cerr << "#error: eigenvalues computing error" << endl;
			flog << "#error: eigenvalues computing error" << endl;
			return false;
		}

		// sort the eigenvalues in descending order by the index
		std::sort(index.begin(), index.end(), cmp);

		// get the maximum/minimum curvature and the direction
		maxCurvature = eigensolver.eigenvalues()(index[1]); // maximum curvature
		minCurvature = eigensolver.eigenvalues()(index[2]); // minimum curvature
		maxDir.Set(eigensolver.eigenvectors()(0, index[1]),
			       eigensolver.eigenvectors()(1, index[1]),
			       eigensolver.eigenvectors()(2, index[1])); // maximum curvature direction
		minDir.Set(eigensolver.eigenvectors()(0, index[2]),
			       eigensolver.eigenvectors()(1, index[2]),
			       eigensolver.eigenvectors()(2, index[2])); // maximum curvature direction

		// partition to 8
		if (vc.awN.Length() / vc.area < SPHERE_MEAN_NORMAL_THRESH)
		{
			HNormal p_nm = maxDir ^ minDir;

			partition8(vc,
				p_nm, HFaceFormula::calcD(p_nm, vc.meanVertex),
				maxDir, HFaceFormula::calcD(maxDir, vc.meanVertex),
				minDir, HFaceFormula::calcD(minDir, vc.meanVertex));
		}
		// partition to 4
		else if (maxCurvature / minCurvature < MAX_MIN_CURVATURE_RATIO_TREATED_AS_HEMISPHERE)
		{
			partition4(vc, 
				maxDir, HFaceFormula::calcD(maxDir, vc.meanVertex),
				minDir, HFaceFormula::calcD(minDir, vc.meanVertex));
		}
		// partition to 2
		else {
			partition2(vc, maxDir, HFaceFormula::calcD(maxDir, vc.meanVertex));
		}

#ifdef PRINT_DEBUG_INFO
		PrintHeap(fdebug, clusters);
#endif
	}

	//PrintHeap(cout, clusters);

	cout << "\tsimplification time:\t" << htime.printElapseSec() << endl << endl;

	flog << "\tsimplification time:\t" << htime.printElapseSec() << endl << endl;

	return true;
}
template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
{
  typedef typename MatrixType::Index Index;
  /* this test covers the following files:
     EigenSolver.h, SelfAdjointEigenSolver.h (and indirectly: Tridiagonalization.h)
  */
  Index rows = m.rows();
  Index cols = m.cols();

  typedef typename MatrixType::Scalar Scalar;
  typedef typename NumTraits<Scalar>::Real RealScalar;

  RealScalar largerEps = 10*test_precision<RealScalar>();

  MatrixType a = MatrixType::Random(rows,cols);
  MatrixType a1 = MatrixType::Random(rows,cols);
  MatrixType symmA =  a.adjoint() * a + a1.adjoint() * a1;
  MatrixType symmC = symmA;

  // randomly nullify some rows/columns
  {
    Index count = 1;//internal::random<Index>(-cols,cols);
    for(Index k=0; k<count; ++k)
    {
      Index i = internal::random<Index>(0,cols-1);
      symmA.row(i).setZero();
      symmA.col(i).setZero();
    }
  }

  symmA.template triangularView<StrictlyUpper>().setZero();
  symmC.template triangularView<StrictlyUpper>().setZero();

  MatrixType b = MatrixType::Random(rows,cols);
  MatrixType b1 = MatrixType::Random(rows,cols);
  MatrixType symmB = b.adjoint() * b + b1.adjoint() * b1;
  symmB.template triangularView<StrictlyUpper>().setZero();

  SelfAdjointEigenSolver<MatrixType> eiSymm(symmA);
  SelfAdjointEigenSolver<MatrixType> eiDirect;
  eiDirect.computeDirect(symmA);
  // generalized eigen pb
  GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmC, symmB);

  VERIFY_IS_EQUAL(eiSymm.info(), Success);
  VERIFY((symmA.template selfadjointView<Lower>() * eiSymm.eigenvectors()).isApprox(
          eiSymm.eigenvectors() * eiSymm.eigenvalues().asDiagonal(), largerEps));
  VERIFY_IS_APPROX(symmA.template selfadjointView<Lower>().eigenvalues(), eiSymm.eigenvalues());

  VERIFY_IS_EQUAL(eiDirect.info(), Success);
  VERIFY((symmA.template selfadjointView<Lower>() * eiDirect.eigenvectors()).isApprox(
          eiDirect.eigenvectors() * eiDirect.eigenvalues().asDiagonal(), largerEps));
  VERIFY_IS_APPROX(symmA.template selfadjointView<Lower>().eigenvalues(), eiDirect.eigenvalues());

  SelfAdjointEigenSolver<MatrixType> eiSymmNoEivecs(symmA, false);
  VERIFY_IS_EQUAL(eiSymmNoEivecs.info(), Success);
  VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());

  // generalized eigen problem Ax = lBx
  eiSymmGen.compute(symmC, symmB,Ax_lBx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
          symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));

  // generalized eigen problem BAx = lx
  eiSymmGen.compute(symmC, symmB,BAx_lx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmB.template selfadjointView<Lower>() * (symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
         (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));

  // generalized eigen problem ABx = lx
  eiSymmGen.compute(symmC, symmB,ABx_lx);
  VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
  VERIFY((symmC.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
         (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));


  eiSymm.compute(symmC);
  MatrixType sqrtSymmA = eiSymm.operatorSqrt();
  VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
  VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());

  MatrixType id = MatrixType::Identity(rows, cols);
  VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));

  SelfAdjointEigenSolver<MatrixType> eiSymmUninitialized;
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.info());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvalues());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());

  eiSymmUninitialized.compute(symmA, false);
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
  VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());

  // test Tridiagonalization's methods
  Tridiagonalization<MatrixType> tridiag(symmC);
  // FIXME tridiag.matrixQ().adjoint() does not work
  VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());

  if (rows > 1)
  {
    // Test matrix with NaN
    symmC(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
    SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmC);
    VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
  }
}