void test_boostmultiprec()
{
  typedef Matrix<Real,Dynamic,Dynamic> Mat;
  typedef Matrix<std::complex<Real>,Dynamic,Dynamic> MatC;

  std::cout << "NumTraits<Real>::epsilon()         = " << NumTraits<Real>::epsilon() << std::endl;
  std::cout << "NumTraits<Real>::dummy_precision() = " << NumTraits<Real>::dummy_precision() << std::endl;
  std::cout << "NumTraits<Real>::lowest()          = " << NumTraits<Real>::lowest() << std::endl;
  std::cout << "NumTraits<Real>::highest()         = " << NumTraits<Real>::highest() << std::endl;
  std::cout << "NumTraits<Real>::digits10()        = " << NumTraits<Real>::digits10() << std::endl;

  // chekc stream output
  {
    Mat A(10,10);
    A.setRandom();
    std::stringstream ss;
    ss << A;
  }
  {
    MatC A(10,10);
    A.setRandom();
    std::stringstream ss;
    ss << A;
  }

  for(int i = 0; i < g_repeat; i++) {
    int s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);

    CALL_SUBTEST_1( cholesky(Mat(s,s)) );

    CALL_SUBTEST_2( lu_non_invertible<Mat>() );
    CALL_SUBTEST_2( lu_invertible<Mat>() );
    CALL_SUBTEST_2( lu_non_invertible<MatC>() );
    CALL_SUBTEST_2( lu_invertible<MatC>() );

    CALL_SUBTEST_3( qr(Mat(internal::random<int>(1,EIGEN_TEST_MAX_SIZE),internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
    CALL_SUBTEST_3( qr_invertible<Mat>() );

    CALL_SUBTEST_4( qr<Mat>() );
    CALL_SUBTEST_4( cod<Mat>() );
    CALL_SUBTEST_4( qr_invertible<Mat>() );

    CALL_SUBTEST_5( qr<Mat>() );
    CALL_SUBTEST_5( qr_invertible<Mat>() );

    CALL_SUBTEST_6( selfadjointeigensolver(Mat(s,s)) );

    CALL_SUBTEST_7( eigensolver(Mat(s,s)) );

    CALL_SUBTEST_8( generalized_eigensolver_real(Mat(s,s)) );

    TEST_SET_BUT_UNUSED_VARIABLE(s)
  }

  CALL_SUBTEST_9(( jacobisvd(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
  CALL_SUBTEST_10(( bdcsvd(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
}
示例#2
0
typename GaussianProcess<TScalarType>::MatrixType GaussianProcess<TScalarType>::InvertKernelMatrix(const typename GaussianProcess<TScalarType>::MatrixType &K,
                                                      typename GaussianProcess<TScalarType>::InversionMethod inv_method = GaussianProcess<TScalarType>::FullPivotLU,
                                                                                                   bool stable) const{
    // compute core matrix
    if(debug){
        std::cout << "GaussianProcess::InvertKernelMatrix: inverting kernel matrix... ";
        std::cout.flush();
    }

    typename GaussianProcess<TScalarType>::MatrixType core;

    switch(inv_method){
    // standard method: fast but not that accurate
    // Uses the LU decomposition with full pivoting for the inversion
    case FullPivotLU:{
        if(debug) std::cout << " (inversion method: FullPivotLU) " << std::flush;
        try{
            if(stable){
                core = K.inverse();
            }
            else{
                if(debug) std::cout << " (using lapack) " << std::flush;
                core = lapack::lu_invert<TScalarType>(K);
            }
        }
        catch(lapack::LAPACKException& e){
            core = K.inverse();
        }
    }
    break;

    // very accurate and very slow method, use it for small problems
    // Uses the two-sided Jacobi SVD decomposition
    case JacobiSVD:{
        if(debug) std::cout << " (inversion method: JacobiSVD) " << std::flush;
        Eigen::JacobiSVD<MatrixType> jacobisvd(K, Eigen::ComputeThinU | Eigen::ComputeThinV);
        if((jacobisvd.singularValues().real().array() < 0).any() && debug){
            std::cout << "GaussianProcess::InvertKernelMatrix: warning: there are negative eigenvalues.";
            std::cout.flush();
        }
        core = jacobisvd.matrixV() * VectorType(1/jacobisvd.singularValues().array()).asDiagonal() * jacobisvd.matrixU().transpose();
    }
    break;

    // accurate method and faster than Jacobi SVD.
    // Uses the bidiagonal divide and conquer SVD
    case BDCSVD:{
        if(debug) std::cout << " (inversion method: BDCSVD) " << std::flush;
#ifdef EIGEN_BDCSVD_H
        Eigen::BDCSVD<MatrixType> bdcsvd(K, Eigen::ComputeThinU | Eigen::ComputeThinV);
        if((bdcsvd.singularValues().real().array() < 0).any() && debug){
            std::cout << "GaussianProcess::InvertKernelMatrix: warning: there are negative eigenvalues.";
            std::cout.flush();
        }
        core = bdcsvd.matrixV() * VectorType(1/bdcsvd.singularValues().array()).asDiagonal() * bdcsvd.matrixU().transpose();
#else
        // this is checked, since BDCSVD is currently not in the newest release
        throw std::string("GaussianProcess::InvertKernelMatrix: BDCSVD is not supported by the provided Eigen library.");
#endif

    }
    break;

    // faster than the SVD method but less stable
    // computes the eigenvalues/eigenvectors of selfadjoint matrices
    case SelfAdjointEigenSolver:{
        if(debug) std::cout << " (inversion method: SelfAdjointEigenSolver) " << std::flush;
        try{
            core = lapack::chol_invert<TScalarType>(K);
        }
        catch(lapack::LAPACKException& e){
            Eigen::SelfAdjointEigenSolver<MatrixType> es;
            es.compute(K);
            VectorType eigenValues = es.eigenvalues().reverse();
            MatrixType eigenVectors = es.eigenvectors().rowwise().reverse();
            if((eigenValues.real().array() < 0).any() && debug){
                std::cout << "GaussianProcess::InvertKernelMatrix: warning: there are negative eigenvalues.";
                std::cout.flush();
            }
            core = eigenVectors * VectorType(1/eigenValues.array()).asDiagonal() * eigenVectors.transpose();
        }
    }
    break;
    }

    if(debug) std::cout << "[done]" << std::endl;
    return core;
}