예제 #1
0
matrix<Type> invertSparseMatrix(Eigen::SparseMatrix<Type> A){
  matrix<Type> I(A.rows(),A.cols());
  I.setIdentity();
  Eigen::SimplicialLDLT< Eigen::SparseMatrix<Type> > ldlt(A);
  matrix<Type> ans = ldlt.solve(I);
  return ans;
}
MatrixXd MultivariateFNormalSufficient::compute_PW_direct() const
{
    //Eigen::LLT<MatrixXd, Eigen::Upper> ldlt(get_ldlt());
    Eigen::LDLT<MatrixXd, Eigen::Upper> ldlt(get_ldlt());
    MatrixXd tmp(ldlt.solve(get_W()));
    return tmp;
}
double GaussianProcessInterpolationRestraint::get_logdet_hessian() const {
  // compute ldlt
  IMP_Eigen::LDLT<IMP_Eigen::MatrixXd, IMP_Eigen::Upper> ldlt(get_hessian());
  if (!ldlt.isPositive())
    IMP_THROW("Hessian matrix is not positive definite!", ModelException);
  return ldlt.vectorD().array().abs().log().sum();
}
std::vector<double> MultivariateFNormalSufficient::get_norms() const
{
    if (!flag_norms_)
    {
        //Eigen::LLT<MatrixXd, Eigen::Upper> ldlt(get_ldlt());
        Eigen::LDLT<MatrixXd, Eigen::Upper> ldlt(get_ldlt());
        // determinant and derived constants
        //MatrixXd L(ldlt.matrixU());
        //std::cout << "L: " << L << std::endl << std::endl;
        //std::cout << "D: " << ldlt.vectorD() << std::endl << std::endl;
        //double logDetSigma=2*L.diagonal().array().log().sum() ;
        double logDetSigma = ldlt.vectorD().array().abs().log().sum();
        LOG( "MVN:   det(Sigma) = " <<exp(logDetSigma) << std::endl);
        double norm=pow(2*PI*SQUARE(factor_), -double(N_*M_)/2.0)
                    * exp(-double(N_)/2.0*logDetSigma);
        double lnorm=double(N_*M_)/2 * log(2*PI*SQUARE(factor_))
            + double(N_)/2 * logDetSigma;
        LOG( "MVN:   norm = " << norm << " lnorm = "
                << lnorm << std::endl);
        const_cast<MultivariateFNormalSufficient *>(this)
            ->set_norms(norm,lnorm);
    }
    std::vector<double> norms;
    norms.push_back(norm_);
    norms.push_back(lnorm_);
    return norms;
}
Vector2d leastSquareSolverCalib::pointAlignerLoop( Vector2d &x,  MatrixXd &Z, int iterations,  Vector2d &result)
{
    result=x;
    MatrixXd H(2,2);
    H.setZero();

    MatrixXd b(2,1);
    b.setZero();

    Vector2d X;
    std::cout << Z.transpose()<< std::endl;
    for(int i = 0; i < iterations; i++){
        std::cout<<"iteration "<<i <<std::endl;
        X=x;
        for(int j=0;j<Z.cols();j++){

            Matrix2d J = computeJacobian(j,Z);
            Vector2d e=computeError(j,Z);
            std:: cout << "error: "<< e<< std::endl<<std::endl;
            H+=J.transpose()*J;
            b+=J.transpose()*e;
        }
    }
    Vector2d dx;
    std::cout << "H: "<<std::endl<<H<<std::endl<<std::endl<<std::endl;
    std::cout << "b: "<<std::endl<<b<<std::endl;
    LDLT<MatrixXd> ldlt(-H);
    dx=ldlt.solve(b); // using a LDLT factorizationldlt;
    return dx;

}
IMP_Eigen::MatrixXd
GaussianProcessInterpolation::get_posterior_covariance_matrix(FloatsList x)
    const {
  unsigned N(x.size());
  IMP_Eigen::MatrixXd Wpri(M_, N);
  for (unsigned i = 0; i < N; i++) Wpri.col(i) = get_wx_vector(x[i]);
  IMP_Eigen::LDLT<IMP_Eigen::MatrixXd, IMP_Eigen::Upper> ldlt(get_ldlt());
  // we can now use covariance_function_ because it is up to date
  IMP_Eigen::MatrixXd Wpost((*covariance_function_)(x));
  return Wpost - Wpri.transpose() * ldlt.solve(Wpri);
}
예제 #7
0
 inline
 Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic>
 inverse_spd(const Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic>& m) {
   validate_square(m,"inverse_spd");
   validate_symmetric(m,"inverse_spd");
   Eigen::LDLT< Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> > ldlt(0.5*(m+m.transpose()));
   if (ldlt.info() != Eigen::Success)
     throw std::domain_error("Error in inverse_spd, LDLT factorization failed");
   if (!ldlt.isPositive() || (ldlt.vectorD().array() <= 0).any())
     throw std::domain_error("Error in inverse_spd, matrix not positive definite");
   return ldlt.solve(Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic>::Identity(m.rows(),m.cols()));
 }
예제 #8
0
MatrixType CovarianceEstimator::Evaluate( const VectorType& input )
{
	_psdPort.SetOutput( input );
	_psd.Invalidate();
	_psdPort.Foreprop();
	MatrixType out = _psd.GetOutput();
	if( !_covarianceOutput )
	{
		Eigen::LDLT<MatrixType> ldlt( out );
		out = ldlt.solve( MatrixType::Identity( out.rows(), out.cols() ) );
	}
	return out;
}
MatrixXd MultivariateFNormalSufficient::get_P() const
{
    if (!flag_P_)
    {
        //inverse
        //Eigen::LLT<MatrixXd, Eigen::Upper> ldlt(get_ldlt());
        Eigen::LDLT<MatrixXd, Eigen::Upper> ldlt(get_ldlt());
        LOG( "MVN:   solving for inverse" << std::endl);
        const_cast<MultivariateFNormalSufficient *>(this)
            ->set_P(ldlt.solve(MatrixXd::Identity(M_,M_)));
    }
    return P_;
}
예제 #10
0
bool gcta::comput_inverse_logdet_LDLT(eigenMatrix &Vi, eigenVector &prev_varcmp, double &logdet)
{
    int i=0, n=Vi.cols();
    LDLT<eigenMatrix> ldlt(Vi);
    eigenVector d=ldlt.vectorD();
    if(d.minCoeff()<0){
        if(prev_varcmp.minCoeff()>0) return false;
        else throw("Error: the matrix V becomes negative-definite because of one of the variance component is negative.\nPlease re-run the analysis without the --reml-no-constrain option.");
    }
    logdet=0.0;
    for(i=0; i<n; i++) logdet+=log(d[i]);
    Vi.setIdentity();
    ldlt.solveInPlace(Vi);
    return true;
}
//Eigen::LLT<MatrixXd, Eigen::Upper>
Eigen::LDLT<MatrixXd, Eigen::Upper>
MultivariateFNormalSufficient::get_ldlt() const
{
    if (!flag_ldlt_)
    {
        LOG( "MVN:   computing Cholesky decomposition" << std::endl);
        // compute Cholesky decomposition for determinant and inverse
        //Eigen::LLT<MatrixXd, Eigen::Upper> ldlt(get_Sigma());
        Eigen::LDLT<MatrixXd, Eigen::Upper> ldlt(get_Sigma());
        //if (ldlt.info() != Eigen::Success)
        CHECK(ldlt.isPositive(),
            "Sigma matrix is not positive semidefinite!");
        const_cast<MultivariateFNormalSufficient *>(this)->set_ldlt(ldlt);
    }
    return ldlt_;
}
예제 #12
0
matrix<Type> discrLyap(matrix<Type> A_){
  matrix<Type> I_(A_.rows(),A_.cols());
  I_.setIdentity();
  /* Sparse representations */
  Eigen::SparseMatrix<Type> A=asSparseMatrix(A_);
  Eigen::SparseMatrix<Type> I=asSparseMatrix(I_);
  /* Kronecker */
  Eigen::SparseMatrix<Type> AxA=kronecker(A,A);
  Eigen::SparseMatrix<Type> IxI=kronecker(I,I);
  matrix<Type> vecI=I_.vec().matrix();
  Eigen::SparseMatrix<Type> C=IxI-AxA;
  /* Solve system C*vecV=vecI.
     Note: No assumptions on symmetry or eigenvalue-range of C. Therefore 
     rewrite it as (C'*C)*vecV=(C*vecI). Since (C'*C) is symmetric, the 
     LDL'-factorization can be applied.
   */
  Eigen::SimplicialLDLT< Eigen::SparseMatrix<Type> > ldlt(C.transpose()*C);
  matrix<Type> z = ldlt.solve(C.transpose()*vecI);
  matrix<Type> V(A_.rows(),A_.cols());
  for(int i=0;i<z.size();i++)V(i)=z(i);
  return V;
}
//Eigen::LLT<MatrixXd, Eigen::Upper>
Eigen::LDLT<MatrixXd, Eigen::Upper>
MultivariateFNormalSufficient::get_ldlt() const
{
    if (!flag_ldlt_)
    {
        timer_.start(CHOLESKY);
        IMP_LOG(TERSE, "MVN:   computing Cholesky decomposition" << std::endl);
        // compute Cholesky decomposition for determinant and inverse
        //Eigen::LLT<MatrixXd, Eigen::Upper> ldlt(get_Sigma());
        Eigen::LDLT<MatrixXd, Eigen::Upper> ldlt(get_Sigma());
        //if (ldlt.info() != Eigen::Success)
        if (!ldlt.isPositive())
        {
            std::cout << "Sigma" << std::endl;
            std::cout << get_Sigma() << std::endl;
            IMP_THROW("Sigma matrix is not positive semidefinite!",
                    ModelException);
        }
        const_cast<MultivariateFNormalSufficient *>(this)->set_ldlt(ldlt);
        timer_.stop(CHOLESKY);
    }
    return ldlt_;
}
template<typename Scalar> void sparse_solvers(int rows, int cols)
{
  double density = std::max(8./(rows*cols), 0.01);
  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
  typedef Matrix<Scalar,Dynamic,1> DenseVector;
  // Scalar eps = 1e-6;

  DenseVector vec1 = DenseVector::Random(rows);

  std::vector<Vector2i> zeroCoords;
  std::vector<Vector2i> nonzeroCoords;

  // test triangular solver
  {
    DenseVector vec2 = vec1, vec3 = vec1;
    SparseMatrix<Scalar> m2(rows, cols);
    DenseMatrix refMat2 = DenseMatrix::Zero(rows, cols);

    // lower
    initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeLowerTriangular, &zeroCoords, &nonzeroCoords);
    VERIFY_IS_APPROX(refMat2.template marked<LowerTriangular>().solveTriangular(vec2),
                     m2.template marked<LowerTriangular>().solveTriangular(vec3));

    // lower - transpose
    initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeLowerTriangular, &zeroCoords, &nonzeroCoords);
    VERIFY_IS_APPROX(refMat2.template marked<LowerTriangular>().transpose().solveTriangular(vec2),
                     m2.template marked<LowerTriangular>().transpose().solveTriangular(vec3));

    // upper
    initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeUpperTriangular, &zeroCoords, &nonzeroCoords);
    VERIFY_IS_APPROX(refMat2.template marked<UpperTriangular>().solveTriangular(vec2),
                     m2.template marked<UpperTriangular>().solveTriangular(vec3));

    // upper - transpose
    initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeUpperTriangular, &zeroCoords, &nonzeroCoords);
    VERIFY_IS_APPROX(refMat2.template marked<UpperTriangular>().transpose().solveTriangular(vec2),
                     m2.template marked<UpperTriangular>().transpose().solveTriangular(vec3));
  }

  // test LLT
  {
    // TODO fix the issue with complex (see SparseLLT::solveInPlace)
    SparseMatrix<Scalar> m2(rows, cols);
    DenseMatrix refMat2(rows, cols);

    DenseVector b = DenseVector::Random(cols);
    DenseVector refX(cols), x(cols);

    initSPD(density, refMat2, m2);

    refMat2.llt().solve(b, &refX);
    typedef SparseMatrix<Scalar,LowerTriangular|SelfAdjoint> SparseSelfAdjointMatrix;
    if (!NumTraits<Scalar>::IsComplex)
    {
      x = b;
      SparseLLT<SparseSelfAdjointMatrix> (m2).solveInPlace(x);
      VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: default");
    }
    #ifdef EIGEN_CHOLMOD_SUPPORT
    x = b;
    SparseLLT<SparseSelfAdjointMatrix,Cholmod>(m2).solveInPlace(x);
    VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: cholmod");
    #endif
    if (!NumTraits<Scalar>::IsComplex)
    {
      #ifdef EIGEN_TAUCS_SUPPORT
      x = b;
      SparseLLT<SparseSelfAdjointMatrix,Taucs>(m2,IncompleteFactorization).solveInPlace(x);
      VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: taucs (IncompleteFactorization)");
      x = b;
      SparseLLT<SparseSelfAdjointMatrix,Taucs>(m2,SupernodalMultifrontal).solveInPlace(x);
      VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: taucs (SupernodalMultifrontal)");
      x = b;
      SparseLLT<SparseSelfAdjointMatrix,Taucs>(m2,SupernodalLeftLooking).solveInPlace(x);
      VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: taucs (SupernodalLeftLooking)");
      #endif
    }
  }

  // test LDLT
  if (!NumTraits<Scalar>::IsComplex)
  {
    // TODO fix the issue with complex (see SparseLDLT::solveInPlace)
    SparseMatrix<Scalar> m2(rows, cols);
    DenseMatrix refMat2(rows, cols);

    DenseVector b = DenseVector::Random(cols);
    DenseVector refX(cols), x(cols);

    //initSPD(density, refMat2, m2);
    initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeUpperTriangular, 0, 0);
    refMat2 += refMat2.adjoint();
    refMat2.diagonal() *= 0.5;

    refMat2.ldlt().solve(b, &refX);
    typedef SparseMatrix<Scalar,UpperTriangular|SelfAdjoint> SparseSelfAdjointMatrix;
    x = b;
    SparseLDLT<SparseSelfAdjointMatrix> ldlt(m2);
    if (ldlt.succeeded())
      ldlt.solveInPlace(x);
    VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LDLT: default");
  }

  // test LU
  {
    static int count = 0;
    SparseMatrix<Scalar> m2(rows, cols);
    DenseMatrix refMat2(rows, cols);

    DenseVector b = DenseVector::Random(cols);
    DenseVector refX(cols), x(cols);

    initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag, &zeroCoords, &nonzeroCoords);

    LU<DenseMatrix> refLu(refMat2);
    refLu.solve(b, &refX);
    #if defined(EIGEN_SUPERLU_SUPPORT) || defined(EIGEN_UMFPACK_SUPPORT)
    Scalar refDet = refLu.determinant();
    #endif
    x.setZero();
    // // SparseLU<SparseMatrix<Scalar> > (m2).solve(b,&x);
    // // VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LU: default");
    #ifdef EIGEN_SUPERLU_SUPPORT
    {
      x.setZero();
      SparseLU<SparseMatrix<Scalar>,SuperLU> slu(m2);
      if (slu.succeeded())
      {
        if (slu.solve(b,&x)) {
          VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LU: SuperLU");
        }
        // std::cerr << refDet << " == " << slu.determinant() << "\n";
        if (count==0) {
          VERIFY_IS_APPROX(refDet,slu.determinant()); // FIXME det is not very stable for complex
        }
      }
    }
    #endif
    #ifdef EIGEN_UMFPACK_SUPPORT
    {
      // check solve
      x.setZero();
      SparseLU<SparseMatrix<Scalar>,UmfPack> slu(m2);
      if (slu.succeeded()) {
        if (slu.solve(b,&x)) {
          if (count==0) {
            VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LU: umfpack");  // FIXME solve is not very stable for complex
          }
        }
        VERIFY_IS_APPROX(refDet,slu.determinant());
        // TODO check the extracted data
        //std::cerr << slu.matrixL() << "\n";
      }
    }
    #endif
    count++;
  }

}
void GaussianProcessInterpolation::compute_Omi() {
  // get inverse
  IMP_LOG_TERSE("  compute_Omi: inverse" << std::endl);
  IMP_Eigen::LDLT<IMP_Eigen::MatrixXd, IMP_Eigen::Upper> ldlt(get_ldlt());
  Omi_ = ldlt.solve(IMP_Eigen::MatrixXd::Identity(M_, M_));
}
IMP_Eigen::MatrixXd GaussianProcessInterpolation::get_dcov_dOm(Floats q) const {
  IMP_Eigen::VectorXd wq(get_wx_vector(q));
  IMP_Eigen::LDLT<IMP_Eigen::MatrixXd, IMP_Eigen::Upper> ldlt(get_ldlt());
  IMP_Eigen::VectorXd ret(ldlt.solve(wq));
  return ret * ret.transpose();
}
예제 #17
0
template<typename Scalar> void sparse_ldlt(int rows, int cols)
{
  static bool odd = true;
  odd = !odd;
  double density = (std::max)(8./(rows*cols), 0.01);
  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
  typedef Matrix<Scalar,Dynamic,1> DenseVector;

  SparseMatrix<Scalar> m2(rows, cols);
  DenseMatrix refMat2(rows, cols);

  DenseVector b = DenseVector::Random(cols);
  DenseVector refX(cols), x(cols);

  initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeUpperTriangular, 0, 0);
  
  SparseMatrix<Scalar> m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows);
  DenseMatrix refMat3 = refMat2 * refMat2.adjoint();
  
  refX = refMat3.template selfadjointView<Upper>().ldlt().solve(b);
  typedef SparseMatrix<Scalar,Upper|SelfAdjoint> SparseSelfAdjointMatrix;
  x = b;
  SparseLDLT<SparseSelfAdjointMatrix> ldlt(m3);
  if (ldlt.succeeded())
    ldlt.solveInPlace(x);
  else
    std::cerr << "warning LDLT failed\n";

  VERIFY_IS_APPROX(refMat3.template selfadjointView<Upper>() * x, b);
  VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LDLT: default");
  
#ifdef EIGEN_CHOLMOD_SUPPORT
  {
    x = b;
    SparseLDLT<SparseSelfAdjointMatrix, Cholmod> ldlt2(m3);
    if (ldlt2.succeeded())
    {
      ldlt2.solveInPlace(x);
      VERIFY_IS_APPROX(refMat3.template selfadjointView<Upper>() * x, b);
      VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LDLT: cholmod solveInPlace");
      
      x = ldlt2.solve(b);
      VERIFY_IS_APPROX(refMat3.template selfadjointView<Upper>() * x, b);
      VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LDLT: cholmod solve");
    }
    else
      std::cerr << "warning LDLT failed\n";
  }
#endif
  
  // new Simplicial LLT
  
  
  // new API
  {
    SparseMatrix<Scalar> m2(rows, cols);
    DenseMatrix refMat2(rows, cols);

    DenseVector b = DenseVector::Random(cols);
    DenseVector ref_x(cols), x(cols);
    DenseMatrix B = DenseMatrix::Random(rows,cols);
    DenseMatrix ref_X(rows,cols), X(rows,cols);

    initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeLowerTriangular, 0, 0);

    for(int i=0; i<rows; ++i)
      m2.coeffRef(i,i) = refMat2(i,i) = internal::abs(internal::real(refMat2(i,i)));
    
    
    SparseMatrix<Scalar> m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows);
    DenseMatrix refMat3 = refMat2 * refMat2.adjoint();
    
    m3_lo.template selfadjointView<Lower>().rankUpdate(m2,0);
    m3_up.template selfadjointView<Upper>().rankUpdate(m2,0);
    
    // with a single vector as the rhs
    ref_x = refMat3.template selfadjointView<Lower>().llt().solve(b);

    x = SimplicialCholesky<SparseMatrix<Scalar>, Lower>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(b);
    VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "SimplicialCholesky: solve, full storage, lower, single dense rhs");
    
    x = SimplicialCholesky<SparseMatrix<Scalar>, Upper>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(b);
    VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "SimplicialCholesky: solve, full storage, upper, single dense rhs");
    
    x = SimplicialCholesky<SparseMatrix<Scalar>, Lower>(m3_lo).solve(b);
    VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "SimplicialCholesky: solve, lower only, single dense rhs");
    
    x = SimplicialCholesky<SparseMatrix<Scalar>, Upper>(m3_up).solve(b);
    VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "SimplicialCholesky: solve, upper only, single dense rhs");
    
    
    // with multiple rhs
    ref_X = refMat3.template selfadjointView<Lower>().llt().solve(B);

    X = SimplicialCholesky<SparseMatrix<Scalar>, Lower>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(B);
    VERIFY(ref_X.isApprox(X,test_precision<Scalar>()) && "SimplicialCholesky: solve, full storage, lower, multiple dense rhs");
    
    X = SimplicialCholesky<SparseMatrix<Scalar>, Upper>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(B);
    VERIFY(ref_X.isApprox(X,test_precision<Scalar>()) && "SimplicialCholesky: solve, full storage, upper, multiple dense rhs");
    
    
    // with a sparse rhs
//     SparseMatrix<Scalar> spB(rows,cols), spX(rows,cols);
//     B.diagonal().array() += 1;
//     spB = B.sparseView(0.5,1);
//     
//     ref_X = refMat3.template selfadjointView<Lower>().llt().solve(DenseMatrix(spB));
// 
//     spX = SimplicialCholesky<SparseMatrix<Scalar>, Lower>(m3).solve(spB);
//     VERIFY(ref_X.isApprox(spX.toDense(),test_precision<Scalar>()) && "LLT: cholmod solve, multiple sparse rhs");
//     
//     spX = SimplicialCholesky<SparseMatrix<Scalar>, Upper>(m3).solve(spB);
//     VERIFY(ref_X.isApprox(spX.toDense(),test_precision<Scalar>()) && "LLT: cholmod solve, multiple sparse rhs");
  }
  
  
  
//   for(int i=0; i<rows; ++i)
//     m2.coeffRef(i,i) = refMat2(i,i) = internal::abs(internal::real(refMat2(i,i)));
// 
//   refX = refMat2.template selfadjointView<Upper>().ldlt().solve(b);
//   typedef SparseMatrix<Scalar,Upper|SelfAdjoint> SparseSelfAdjointMatrix;
//   x = b;
//   SparseLDLT<SparseSelfAdjointMatrix> ldlt(m2);
//   if (ldlt.succeeded())
//     ldlt.solveInPlace(x);
//   else
//     std::cerr << "warning LDLT failed\n";
// 
//   VERIFY_IS_APPROX(refMat2.template selfadjointView<Upper>() * x, b);
//   VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LDLT: default");


}