Ejemplo n.º 1
0
void eigs_gen_Cpp(MatrixXd &M, VectorXd &init_resid, int k, int m,
                  double &time_used, double &prec_err)
{
    double start, end;
    start = get_wall_time();

    DenseGenMatProd<double> op(M);
    GenEigsSolver< double, LARGEST_MAGN, DenseGenMatProd<double> > eigs(&op, k, m);
    eigs.init(init_resid.data());

    int nconv = eigs.compute();
    int niter = eigs.num_iterations();
    int nops = eigs.num_operations();
    // std::cout << "nops = " << nops << std::endl;

    VectorXcd evals = eigs.eigenvalues();
    MatrixXcd evecs = eigs.eigenvectors();

/*
    std::cout << "computed eigenvalues D = \n" << evals.transpose() << std::endl;
    std::cout << "first 5 rows of computed eigenvectors U = \n" << evecs.topRows<5>() << std::endl;
    std::cout << "nconv = " << nconv << std::endl;
    std::cout << "niter = " << niter << std::endl;
    std::cout << "nops = " << nops << std::endl;

    MatrixXcd err = M * evecs - evecs * evals.asDiagonal();
    std::cout << "||AU - UD||_inf = " << err.array().abs().maxCoeff() << std::endl;
*/

    end = get_wall_time();
    time_used = (end - start) * 1000;

    MatrixXcd err = M * evecs - evecs * evals.asDiagonal();
    prec_err = err.cwiseAbs().maxCoeff();
}
Ejemplo n.º 2
0
        /*!
         Applies the inverse.
         */
	void apply_Inverse(MatrixXcd& matrix, int mStart) {
		int n	=	matrix.cols();
		int start	=	nStart-mStart;
		if (isLeaf	==	true) {
			matrix.block(start, 0, nSize, n)	=	Kinverse.solve(matrix.block(start, 0, nSize, n));
		}
		else if (isLeaf	==	false) {
			//	Computes temp		=	Vinverse*matrix

			MatrixXcd temp(nRank[0]+nRank[1], n);

			temp.block(0, 0, nRank[0] , n)		=	Vinverse[1]*matrix.block(start+child[0]->nSize, 0 , child[1]->nSize, n);

			temp.block(nRank[0], 0, nRank[1] , n)	=	Vinverse[0]*matrix.block(start, 0 , child[0]->nSize, n);

			//	Computes tempSolve	=	Kinverse\temp

			MatrixXcd tempSolve	=	Kinverse.solve(temp);

			//	Computes matrix		=	matrix-Uinverse*tempSolve

			matrix.block(start, 0, child[0]->nSize, n)			=	matrix.block(start, 0, child[0]->nSize, n)	-	Uinverse[0]*tempSolve.block(0, 0, nRank[0], n);
			matrix.block(start + child[0]->nSize, 0, child[1]->nSize, n)	=	matrix.block(start + child[0]->nSize, 0, child[1]->nSize, n)	-	Uinverse[1]*tempSolve.block(nRank[0], 0, nRank[1], n);
		}
	};
Ejemplo n.º 3
0
Measurement::Measurement(MatrixXcd observable)
{
    ComplexEigenSolver<MatrixXcd> solver(observable);
    MatrixXcd vectors = solver.eigenvectors();
    for (int i = 0; i < vectors.cols(); ++i)
	addOperator(vectors.col(i) * vectors.col(i).transpose(), i_to_string(i));    
    _checkOperatorsAreValid();
}
Ejemplo n.º 4
0
void blas_gemm(const MatrixXcd& a, const MatrixXcd& b, MatrixXcd& c)
{
  int M = c.rows(); int N = c.cols(); int K = a.cols();
  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();

  zgemm_(&notrans,&notrans,&M,&N,&K,(double*)&cdone,
         const_cast<double*>((const double*)a.data()),&lda,
         const_cast<double*>((const double*)b.data()),&ldb,(double*)&cdone,
         (double*)c.data(),&ldc);
}
Ejemplo n.º 5
0
complex<double> compute_Determinant(MatrixXcd& K) {
  FullPivLU<MatrixXcd> Kinverse;
  Kinverse.compute(K);
  complex<double> determinant;
  if (K.rows()>0) {        //      Check needed when the matrix is predominantly diagonal.
    MatrixXcd LU    =       Kinverse.matrixLU();
    determinant     =       log(LU(0,0));
    for (int k=1; k<K.rows(); ++k) {
      determinant+=log(LU(k,k));
    }
    //              Previous version which had some underflow.
    //              determinant	=	log(abs(K.determinant()));
  }
  return determinant;
};
Ejemplo n.º 6
0
	void compute_K() {
		if (isLeaf	==	false) {
			int m0	=	V[0].rows();
			int m1	=	V[1].rows();
			K	=	MatrixXcd::Identity(m0+m1, m0+m1);

			K.block(0, m1, m1, m0)	=	Vinverse[1]*Uinverse[1];
			K.block(m1, 0, m0, m1)	=	Vinverse[0]*Uinverse[0];
		}
	};
int main(int, char**)
{
  cout.precision(3);
  MatrixXcd X = MatrixXcd::Random(4,4);
MatrixXcd A = X + X.adjoint();
cout << "Here is a random self-adjoint 4x4 matrix:" << endl << A << endl << endl;

Tridiagonalization<MatrixXcd> triOfA(A);
MatrixXd T = triOfA.matrixT();
cout << "The tridiagonal matrix T is:" << endl << T << endl << endl;

cout << "We can also extract the diagonals of T directly ..." << endl;
VectorXd diag = triOfA.diagonal();
cout << "The diagonal is:" << endl << diag << endl; 
VectorXd subdiag = triOfA.subDiagonal();
cout << "The subdiagonal is:" << endl << subdiag << endl;

  return 0;
}
Ejemplo n.º 8
0
PyObject* calc_Gavg(PyObject *pyw, const double &delta, const double &mu, 
    PyObject *pySE, PyObject *pyTB, const double &Hf, PyObject *py_bp, 
    PyObject *py_wf, const int nthreads) {
  try {
    if (nthreads > 0) omp_set_num_threads(nthreads);
    MatrixXcd SE;
    VectorXcd w;
    VectorXd bp, wf;
    VectorXd SlaterKosterCoeffs;

    numpy::from_numpy(pySE, SE);
    numpy::from_numpy(pyTB, SlaterKosterCoeffs);
    numpy::from_numpy(py_bp, bp);
    numpy::from_numpy(py_wf, wf);
    numpy::from_numpy(pyw, w);
    assert(w.size() == SE.rows());

    double t = SlaterKosterCoeffs(0);
    VectorXd xl(DIM), xh(DIM);
    xl << -2*t;
    xh << 2*t;
    double BZ1 = 1.;

    MatrixXcd result(SE.rows(), MSIZE*MSIZE);
    VectorXcd tmp(MSIZE);
    GreenIntegrand green_integrand(w, mu, SE, SlaterKosterCoeffs, Hf);
    result.setZero();
    for (int n = 0; n < w.size(); ++n) {
      green_integrand.set_data(n);
      tmp = md_int::Integrate(xl, xh, green_integrand, bp, wf);
      for (int i = 0; i < MSIZE; ++i)
        result(n, MSIZE*i + i) = tmp(i);
    }
    result /= BZ1;
    return numpy::to_numpy(result);
  } catch (const char *str) {
      std::cerr << str << std::endl;
      return Py_None;
    }
}
Ejemplo n.º 9
0
  void computeAlphaGradHess(cV& D_inv_m,
			    cM& D00, cM& D10, cM& D20, cM& D11,
			    cV& m0,  cV&m1,   cV& m2,
			    CD* a, 
			    VectorXcd* g, MatrixXcd* h) {
  
    //    VectorXcd D_inv_m = D00.fullPivLu().solve(m0);
    VectorXcd tmp = VectorXcd::Zero(m0.rows());

    // alpha
    *a = (m0.array() * D_inv_m.array()).sum();

    // grad
    Calc_a_Aj_b(D_inv_m, D10, D_inv_m, g);
    Calc_ai_b(m1, D_inv_m, &tmp);
    (*g) *= -1;
    (*g) += 2 * tmp;

    // hess
    MatrixXcd Dinv = D00.inverse();
    MatrixXcd tmp1 = (2*m2.array()*D_inv_m.array()).matrix().asDiagonal();

    MatrixXcd tmp2 = 2 * (m1 * m1.transpose()).array() * Dinv.array();
    MatrixXcd tmp3;
    Calc_a_Aij_a(D_inv_m, D20, D11, &tmp3);
    tmp3 *= -1;
    MatrixXcd tmp4;
    Calc_ai_A_Bj_b(m1, Dinv, D10, D_inv_m, &tmp4);
    tmp4 *= -2;
    MatrixXcd tmp5;
    tmp5 = tmp4.transpose();
    MatrixXcd tmp6;
    Calc_a_Ai_B_Aj_b(D_inv_m, D10, Dinv, D_inv_m, &tmp6);
    tmp6 *= 2;
    *h = tmp1 + tmp2 + tmp3 + tmp4 + tmp5 + tmp6;
  }
Ejemplo n.º 10
0
        /*!
         Matrix matrix product.
         */
	void matrix_Matrix_Product(MatrixXcd& x, MatrixXcd& b) {
		int n	=	x.cols();

		if (isLeaf	==	true) {
			b.block(nStart, 0, nSize, n)	=	b.block(nStart, 0, nSize, n)	+	K*x.block(nStart, 0, nSize, n);
		}
		else if (isLeaf	==	false) {
			int n0	=	child[0]->nStart;
			int n1	=	child[1]->nStart;
			int m0	=	child[0]->nSize;
			int m1	=	child[1]->nSize;
			b.block(n0, 0, m0, n)	=	b.block(n0, 0, m0, n)	+	U[0]*(V[1]*x.block(n1, 0, m1, n));
			b.block(n1, 0, m1, n)	=	b.block(n1, 0, m1, n)	+	U[1]*(V[0]*x.block(n0, 0, m0, n));
		}
	};
Ejemplo n.º 11
0
bool Ellipsoid::overlapsWith(Ellipsoid ellipsoid, bool &ellipsoidMatrixDecompositionIsSuccessful)
{
    // Construct translation matrix

    MatrixXd T1 = MatrixXd::Identity(Ndimensions+1,Ndimensions+1);
    MatrixXd T2 = MatrixXd::Identity(Ndimensions+1,Ndimensions+1);
    
    T1.bottomLeftCorner(1,Ndimensions) = (-1.0) * centerCoordinates.transpose();
    T2.bottomLeftCorner(1,Ndimensions) = (-1.0) * ellipsoid.getCenterCoordinates().transpose();


    // Construct ellipsoid matrix in homogeneous coordinates

    MatrixXd A = MatrixXd::Zero(Ndimensions+1,Ndimensions+1);
    MatrixXd B = A;

    A(Ndimensions,Ndimensions) = -1;
    B(Ndimensions,Ndimensions) = -1;

    A.topLeftCorner(Ndimensions,Ndimensions) = covarianceMatrix.matrix().inverse();
    B.topLeftCorner(Ndimensions,Ndimensions) = ellipsoid.getCovarianceMatrix().matrix().inverse();

    MatrixXd AT = T1*A*T1.transpose();        // Translating to ellipsoid center
    MatrixXd BT = T2*B*T2.transpose();        // Translating to ellipsoid center


    // Compute Hyper Quadric Matrix generating from the two ellipsoids 
    // and derive its eigenvalues decomposition

    MatrixXd C = AT.inverse() * BT;
    MatrixXcd CC(Ndimensions+1,Ndimensions+1);

    CC.imag() = MatrixXd::Zero(Ndimensions+1,Ndimensions+1); 
    CC.real() = C;
    
    ComplexEigenSolver<MatrixXcd> eigenSolver(CC);


    // If eigenvalue decomposition fails, set control flag to false 
    // to stop the nested sampling and print the results 

    if (eigenSolver.info() != Success)
    {
        ellipsoidMatrixDecompositionIsSuccessful = false;
    }
    
    MatrixXcd E = eigenSolver.eigenvalues();
    MatrixXcd V = eigenSolver.eigenvectors();

    bool ellipsoidsDoOverlap = false;       // Assume no overlap in the beginning
    double pointA;                          // Point laying in this ellipsoid
    double pointB;                          // Point laying in the other ellipsoid


    // Loop over all eigenvectors

    for (int i = 0; i < Ndimensions+1; i++) 
    {
        // Skip inadmissible eigenvectors

        if (V(Ndimensions,i).real() == 0)
        {
            continue;                   
        }
        else if (E(i).imag() != 0)
            {
                V.col(i) = V.col(i).array() * (V.conjugate())(Ndimensions,i);      // Multiply eigenvector by complex conjugate of last element
                V.col(i) = V.col(i).array() / V(Ndimensions,i).real();             // Normalize eigenvector to last component value
                pointA = V.col(i).transpose().real() * AT * V.col(i).real();       // Evaluate point from this ellipsoid
                pointB = V.col(i).transpose().real() * BT * V.col(i).real();       // Evaluate point from the other ellipsoid


                // Accept only if point belongs to both ellipsoids

                if ((pointA <= 0) && (pointB <= 0))  
                {
                    ellipsoidsDoOverlap = true;            // Exit if ellipsoidsDoOverlap is found
                    break;
                }
            }
    }

    return ellipsoidsDoOverlap;
}
Ejemplo n.º 12
0
int main() {
	srand (time(NULL));

	unsigned N	=	2000;
	unsigned nRhs	=	1;
	unsigned nLeaf	=	100;
	double tolerance=	1e-15;

	Test_Kernel kernel(N);

	MatrixXcd xExact	=	MatrixXcd::Random(N, nRhs);
	MatrixXcd bExact(N,nRhs), bFast(N,nRhs), xFast(N,nRhs);

	cout << endl << "Number of particles is: " << N << endl;
	clock_t start, end;

	cout << endl << "Setting things up..." << endl;
	start	=	clock();
	HODLR_Tree<Test_Kernel>* A	=	new HODLR_Tree<Test_Kernel>(&kernel, N, nLeaf);
	end		=	clock();
	cout << "Time taken is: " << double(end-start)/double(CLOCKS_PER_SEC)<< endl;

	cout << endl << "Assembling the matrix in HODLR form..." << endl;
	start			=	clock();
	VectorXcd diagonal	=	4.0*VectorXcd::Ones(N);
	A->assemble_Matrix(diagonal, tolerance);
	end		=	clock();
	cout << "Time taken is: " << double(end-start)/double(CLOCKS_PER_SEC)<< endl;

  cout << endl << "Exact matrix vector product..." << endl;
  start           =       clock();
  for (unsigned i=0; i<N; ++i) {
    bExact(i,0)             =       diagonal(i)*xExact(i,0);
    for (unsigned j=0; j<i; ++j) {
      bExact(i,0)     =       bExact(i,0)+kernel.get_Matrix_Entry(i, j)*xExact(j,0);
    }
    for (unsigned j=i+1; j<N; ++j) {
      bExact(i,0)     =       bExact(i,0)+kernel.get_Matrix_Entry(i, j)*xExact(j,0);
    }
  }
  end		=	clock();
	cout << "Time taken is: " << double(end-start)/double(CLOCKS_PER_SEC)<< endl;

	cout << endl << "Fast matrix matrix product..." << endl;
	start		=	clock();
	A->matMatProduct(xExact, bFast);
	end		=	clock();
	cout << "Time taken is: " << double(end-start)/double(CLOCKS_PER_SEC)<< endl;

	cout << endl << "Factoring the matrix..." << endl;
	start		=	clock();
	A->compute_Factor();
	end		=	clock();
	cout << "Time taken is: " << double(end-start)/double(CLOCKS_PER_SEC)<< endl;

	cout << endl << "Solving the system..." << endl;
	start		=	clock();
	A->solve(bExact, xFast);
	end		=	clock();
	cout << "Time taken is: " << double(end-start)/double(CLOCKS_PER_SEC)<< endl;

	cout << endl << "Error in computed solution: " << (xFast-xExact).norm()/xExact.norm()<< endl;

	cout << endl << "Error in matrix matrix product: " << (bFast-bExact).cwiseAbs().maxCoeff() << endl;
	//	MatrixXcd B;
	//	cout << endl << "Assembling the entire matrix..." << endl;
	//	start			=	clock();
	//	get_Matrix(0, 0, N, N, B);
	//	end				=	clock();
	//	cout << endl << "Time taken is: " << double(end-start)/double(CLOCKS_PER_SEC)<< endl;
	//
	//	cout << endl << "Exact determinant is: " << setprecision(16) << log(fabs(B.partialPivLu().determinant())) << endl;

	complex<double> determinant;
	cout << endl << "Computing the log determinant..." << endl;
	start		=	clock();
	A->compute_Determinant(determinant);
	end		=	clock();
	cout << "Time taken is: " << double(end-start)/double(CLOCKS_PER_SEC)<< endl;

	cout << endl << "Log determinant is: " << setprecision(16) << determinant << endl;


  MatrixXcd K;
  kernel.get_Matrix(0, 0, N, N, K);
  for (unsigned k=0; k<N; ++k) {
    K(k,k)  =       diagonal(k);
  }

  complex<double> exact_determinant;
  exact_determinant       =       compute_Determinant(K);
  cout << endl << "Exact log determinant is: " << setprecision(16) << exact_determinant << endl;
	//
	// cout << endl << "Exact matrix matrix product..." << endl;
	// start			=	clock();
	// MatrixXcd bExact	=	B*x;
	// end				=	clock();
	// cout << endl << "Time taken is: " << double(end-start)/double(CLOCKS_PER_SEC)<< endl;
	//
	// cout << endl << (bExact-b).cwiseAbs().maxCoeff() << endl;
}
Ejemplo n.º 13
0
        /*!
         \brief Partial pivoted LU to construct low-rank.


         */
	void partial_Piv_LU(const int start_Row, const int start_Col, const int n_Rows, const int n_Cols, const double tolerance, int& computed_Rank, MatrixXcd& U, MatrixXcd& V) {

	/********************************/
	/*	PURPOSE OF EXISTENCE	*/
	/********************************/

	/*!
         Obtains the low-rank decomposition of the matrix to a desired tolerance using the partial pivoting LU algorithm, i.e., given a sub-matrix 'A' and tolerance 'epsilon', computes matrices 'U' and 'V' such that ||A-UV||_F < epsilon. The norm is Frobenius norm.
         */

	/************************/
	/*	INPUTS          */
	/************************/

	///	start_Row	-	Starting row of the sub-matrix.
	///	start_Col	-	Starting column of the sub-matrix.
	///	n_Rows		-	Number of rows of the sub-matrix.
	///	n_Cols		-	Number of columns of the sub-matrix.
	///	tolerance	-	Tolerance of low-rank approximation.

	/************************/
	/*	OUTPUTS		*/
	/************************/

	///	computed_Rank	-	Rank obtained for the given tolerance.
	///	U		-	Matrix forming the column basis.
	///	V		-	Matrix forming the row basis.

		/// If the matrix is small enough, do not do anything
		int tolerable_Rank =   5;
		if (n_Cols <= tolerable_Rank){
			kernel->get_Matrix(start_Row, start_Col, n_Rows, n_Cols, U);
			V               =   MatrixXcd::Identity(n_Cols, n_Cols);
			computed_Rank   =   n_Cols;
			return;
		}
		else if (n_Rows <= tolerable_Rank){
			U               =   MatrixXcd::Identity(n_Rows, n_Rows);
			kernel->get_Matrix(start_Row, start_Col, n_Rows, n_Cols, V);
			computed_Rank   =   n_Rows;
			return;
		}

		vector<int> rowIndex;	///	This stores the row indices, which have already been used.
		vector<int> colIndex;	///	This stores the column indices, which have already been used.
		vector<VectorXcd> u;	///	Stores the column basis.
		vector<VectorXcd> v;	///	Stores the row basis.

		srand (time(NULL));
                complex<double> max, unused_max, Gamma;

		/*  INITIALIZATION  */

		/// Initialize the matrix norm and the the first row index
		double matrix_Norm  =   0;
		rowIndex.push_back(0);

		int pivot;

		computed_Rank   =   0;

		VectorXcd a, row, col;

		double row_Squared_Norm, row_Norm, col_Squared_Norm, col_Norm;

		/// Repeat till the desired tolerance is obtained
		do {
			/// Generation of the row
			kernel->get_Matrix_Row(start_Col, n_Cols, start_Row+rowIndex.back(), a);
			/// Row of the residuum and the pivot column
			row =   a;
			for (int l=0; l<computed_Rank; ++l) {
				row =   row-u[l](rowIndex.back())*v[l];
			}

			pivot   =   kernel->max_Abs_Vector(row, colIndex, max);

			int max_tries  =   100;
			int count      =   0;
			int count1     =   0;

			/// This randomization is needed if in the middle of the algorithm the row happens to be exactly the linear combination of the previous rows.
			while (abs(max)<tolerance && count < max_tries) {
				int new_rowIndex;
				rowIndex.pop_back();
				do {
					new_rowIndex   =   rand()%n_Rows;
					++count1;
				} while (find(rowIndex.begin(),rowIndex.end(),new_rowIndex)!=rowIndex.end() && count1 < max_tries);
				count1  =   0;
				rowIndex.push_back(new_rowIndex);

				/// Generation of the row
				kernel->get_Matrix_Row(start_Col, n_Cols, start_Row+rowIndex.back(), a);

				/// Row of the residuum and the pivot column
				row =   a;
				for (int l=0; l<computed_Rank; ++l) {
					row =   row-u[l](rowIndex.back())*v[l];
				}
				pivot   =   kernel->max_Abs_Vector(row, colIndex, max);
				++count;
			}

			if (count == max_tries) break;

			count = 0;

			colIndex.push_back(pivot);

			/// Normalizing constant
			Gamma   =   1.0/(max);

			/// Generation of the column
			kernel->get_Matrix_Col(start_Row, n_Rows, start_Col+colIndex.back(), a);

			/// Column of the residuum and the pivot row
			col =   a;
			for (int l=0; l<computed_Rank; ++l) {
				col =   col-v[l](colIndex.back())*u[l];
			}
			pivot   =   kernel->max_Abs_Vector(col, rowIndex, unused_max);

			/// This randomization is needed if in the middle of the algorithm the columns happens to be exactly the linear combination of the previous columns.
			while (abs(max)<tolerance && count < max_tries) {
				colIndex.pop_back();
				int new_colIndex;
				do {
					new_colIndex   =   rand()%n_Cols;
				} while (find(colIndex.begin(),colIndex.end(),new_colIndex)!=colIndex.end() && count1 < max_tries);
				count1  =   0;
				colIndex.push_back(new_colIndex);

				/// Generation of the column
				kernel->get_Matrix_Col(start_Row, n_Rows, start_Col+colIndex.back(), a);

				/// Column of the residuum and the pivot row
				col =   a;
				for (int l=0; l<computed_Rank; ++l) {
					col =   col-u[l](colIndex.back())*v[l];
				}
				pivot   =   kernel->max_Abs_Vector(col, rowIndex, unused_max);
				++count;
			}

			if (count == max_tries) break;

			count = 0;

			rowIndex.push_back(pivot);

			/// New vectors
			u.push_back(Gamma*col);
			v.push_back(row);

			/// New approximation of matrix norm
			row_Squared_Norm    =   row.squaredNorm();
			row_Norm            =   sqrt(row_Squared_Norm);

			col_Squared_Norm    =   col.squaredNorm();
			col_Norm            =   sqrt(col_Squared_Norm);

			matrix_Norm         =   matrix_Norm +   abs(Gamma*Gamma*row_Squared_Norm*col_Squared_Norm);

			for (int j=0; j<computed_Rank; ++j) {
				matrix_Norm     =   matrix_Norm +   2.0*abs(u[j].dot(u.back()))*abs(v[j].dot(v.back()));
			}
			++computed_Rank;
		} while (row_Norm*col_Norm > abs(max)*tolerance*matrix_Norm && computed_Rank <= fmin(n_Rows, n_Cols));

		/// If the computed_Rank is close to full-rank then return the trivial full-rank decomposition
		if (computed_Rank>=fmin(n_Rows, n_Cols)) {
			if (n_Rows < n_Cols) {
				U   =   MatrixXcd::Identity(n_Rows,n_Rows);
				kernel->get_Matrix(start_Row, start_Col, n_Rows, n_Cols, V);
				computed_Rank   =   n_Rows;
				return;
			}
			else {
				kernel->get_Matrix(start_Row, start_Col, n_Rows, n_Cols, U);
				V   =   MatrixXcd::Identity(n_Cols,n_Cols);
				computed_Rank   =   n_Cols;
				return;
			}
		}

		U   =   MatrixXcd(n_Rows,computed_Rank);
		V   =   MatrixXcd(computed_Rank,n_Cols);
		for (int j=0; j<computed_Rank; ++j) {
			U.col(j)    =   u[j];
			V.row(j)    =   v[j];
		}
	};
Ejemplo n.º 14
0
void UnbiasedSquaredPhaseLagIndex::compute(ConnectivitySettings::IntermediateTrialData& inputData,
                                           QVector<QPair<int,MatrixXcd> >& vecPairCsdSum,
                                           QVector<QPair<int,MatrixXd> >& vecPairCsdImagSignSum,
                                           QMutex& mutex,
                                           int iNRows,
                                           int iNFreqs,
                                           int iNfft,
                                           const QPair<MatrixXd, VectorXd>& tapers)
{
    if(inputData.vecPairCsdImagSign.size() == iNRows) {
        //qDebug() << "UnbiasedSquaredPhaseLagIndex::compute - vecPairCsdImagSign was already computed for this trial.";
        return;
    }

    inputData.vecPairCsdImagSign.clear();

    int i,j;

    // Calculate tapered spectra if not available already
    // This code was copied and changed modified Utils/Spectra since we do not want to call the function due to time loss.
    if(inputData.vecTapSpectra.size() != iNRows) {
        inputData.vecTapSpectra.clear();

        RowVectorXd vecInputFFT, rowData;
        RowVectorXcd vecTmpFreq;

        MatrixXcd matTapSpectrum(tapers.first.rows(), iNFreqs);

        QVector<Eigen::MatrixXcd> vecTapSpectra;

        FFT<double> fft;
        fft.SetFlag(fft.HalfSpectrum);

        for (i = 0; i < iNRows; ++i) {
            // Substract mean
            rowData.array() = inputData.matData.row(i).array() - inputData.matData.row(i).mean();

            // Calculate tapered spectra if not available already
            for(j = 0; j < tapers.first.rows(); j++) {
                vecInputFFT = rowData.cwiseProduct(tapers.first.row(j));
                // FFT for freq domain returning the half spectrum and multiply taper weights
                fft.fwd(vecTmpFreq, vecInputFFT, iNfft);
                matTapSpectrum.row(j) = vecTmpFreq * tapers.second(j);
            }

            inputData.vecTapSpectra.append(matTapSpectrum);
        }
    }

    // Compute CSD
    if(inputData.vecPairCsd.isEmpty()) {
        double denomCSD = sqrt(tapers.second.cwiseAbs2().sum()) * sqrt(tapers.second.cwiseAbs2().sum()) / 2.0;

        bool bNfftEven = false;
        if (iNfft % 2 == 0){
            bNfftEven = true;
        }

        MatrixXcd matCsd = MatrixXcd(iNRows, iNFreqs);

        for (i = 0; i < iNRows; ++i) {
            for (j = i; j < iNRows; ++j) {
                // Compute CSD (average over tapers if necessary)
                matCsd.row(j) = inputData.vecTapSpectra.at(i).cwiseProduct(inputData.vecTapSpectra.at(j).conjugate()).colwise().sum() / denomCSD;

                // Divide first and last element by 2 due to half spectrum
                matCsd.row(j)(0) /= 2.0;
                if(bNfftEven) {
                    matCsd.row(j).tail(1) /= 2.0;
                }
            }

            inputData.vecPairCsd.append(QPair<int,MatrixXcd>(i,matCsd));
            inputData.vecPairCsdImagSign.append(QPair<int,MatrixXd>(i,matCsd.imag().cwiseSign()));
        }

        mutex.lock();

        if(vecPairCsdSum.isEmpty()) {
            vecPairCsdSum = inputData.vecPairCsd;
            vecPairCsdImagSignSum = inputData.vecPairCsdImagSign;
        } else {
            for (int j = 0; j < vecPairCsdSum.size(); ++j) {
                vecPairCsdSum[j].second += inputData.vecPairCsd.at(j).second;
                vecPairCsdImagSignSum[j].second += inputData.vecPairCsdImagSign.at(j).second;
            }
        }

        mutex.unlock();
    } else {
        if(inputData.vecPairCsdImagSign.isEmpty()) {
            for (i = 0; i < inputData.vecPairCsd.size(); ++i) {
                inputData.vecPairCsdImagSign.append(QPair<int,MatrixXd>(i,inputData.vecPairCsd.at(i).second.imag().cwiseSign()));
            }

            mutex.lock();

            if(vecPairCsdImagSignSum.isEmpty()) {
                vecPairCsdImagSignSum = inputData.vecPairCsdImagSign;
            } else {
                for (int j = 0; j < vecPairCsdImagSignSum.size(); ++j) {
                    vecPairCsdImagSignSum[j].second += inputData.vecPairCsdImagSign.at(j).second;
                }
            }

            mutex.unlock();
        }
    }
}
MatrixXcd X = MatrixXcd::Random(4,4);
MatrixXcd A = X + X.adjoint();
cout << "Here is a random self-adjoint 4x4 matrix:" << endl << A << endl << endl;

Tridiagonalization<MatrixXcd> triOfA(A);
MatrixXd T = triOfA.matrixT();
cout << "The tridiagonal matrix T is:" << endl << T << endl << endl;

cout << "We can also extract the diagonals of T directly ..." << endl;
VectorXd diag = triOfA.diagonal();
cout << "The diagonal is:" << endl << diag << endl; 
VectorXd subdiag = triOfA.subDiagonal();
cout << "The subdiagonal is:" << endl << subdiag << endl;
Ejemplo n.º 16
0
Rcpp::List EigsGen::extract()
{
    int nconv = iparam[5 - 1];
    int niter = iparam[9 - 1];

    // Sometimes there are nconv = nev + 1 converged eigenvalues,
    // mainly due to pairs of complex eigenvalues.
    // We will truncate at nev.
    int truenconv = nconv > nev ? nev : nconv;

    // Converged eigenvalues from aupd()
    VectorXcd evalsConverged(nconv);
    evalsConverged.real() = MapVec(workl + ncv * ncv, nconv);
    evalsConverged.imag() = MapVec(workl + ncv * ncv + ncv, nconv);
    
    // If only eigenvalues are requested
    if(!retvec)
    {
        if(nconv < nev)
            ::Rf_warning("only %d eigenvalues converged, less than k", nconv);

        sortDesc(evalsConverged);
        
        if(evalsConverged.size() > truenconv)
            evalsConverged.conservativeResize(truenconv);
        
        return returnResult(returnRealIfPossible(evalsConverged),
                            R_NilValue, wrap(truenconv), wrap(niter));
    }
    
    // Recompute the Hessenburg matrix, since occasionally
    // aupd() will give us the incorrect one
    recomputeH();

    MapMat Hm(workl, ncv, ncv);
    MapMat Vm(V, n, ncv);
    RealSchur<MatrixXd> schur(Hm);
    MatrixXd Qm = schur.matrixU();
    MatrixXd Rm = schur.matrixT();
    VectorXcd evalsRm(ncv);
    VectorXi selectInd(nconv);
    
    eigenvalueSchur(Rm, evalsRm);
    findMatchedIndex(evalsConverged.head(nconv), evalsRm, selectInd);
    
    //Rcpp::Rcout << evalsRm << "\n\n";
    //Rcpp::Rcout << evalsConverged << "\n\n";
    
    truenconv = selectInd.size();
    if(truenconv < 1)
    {
        ::Rf_warning("no converged eigenvalues found");
        
        return returnResult(R_NilValue, R_NilValue, wrap(0L),
                            wrap(niter));
    }
    
    // Shrink Qm and Rm to the dimension given by the largest value
    // in selectInd. Since selectInd is strictly increasing,
    // we can just use its last value.
    int lastInd = selectInd[selectInd.size() - 1];
    Qm.conservativeResize(Eigen::NoChange, lastInd + 1);
    Rm.conservativeResize(lastInd + 1, lastInd + 1);
    
    // Eigen decomposition of Rm
    EigenSolver<MatrixXd> es(Rm);
    evalsRm = es.eigenvalues();
    MatrixXcd evecsA = Vm * (Qm * es.eigenvectors());
    
    // Order and select eigenvalues/eigenvectors
    for(int i = 0; i < truenconv; i++)
    {
        // Since selectInd[i] >= i for all i, it is safe to
        // overwrite the elements and columns.
        evalsRm[i] = evalsRm[selectInd[i]];
    }
    if(evalsRm.size() > truenconv)
        evalsRm.conservativeResize(truenconv);
    transformEigenvalues(evalsRm);
    // Now (evalsRm, selectInd) gives the pair of (value, location)
    sortDescPair(evalsRm, selectInd);
    
    if(truenconv > nev)
    {
        truenconv = nev;
        evalsRm.conservativeResize(truenconv);
    }
    MatrixXcd evecsConverged(n, truenconv);
    for(int i = 0; i < truenconv; i++)
    {
        evecsConverged.col(i) = evecsA.col(selectInd[i]);
    }
    
    if(truenconv < nev)
        ::Rf_warning("only %d eigenvalues converged, less than k", truenconv);

    return returnResult(returnRealIfPossible(evalsRm),
                        returnRealIfPossible(evecsConverged),
                        wrap(truenconv),
                        wrap(niter));
}
Ejemplo n.º 17
0
int main() {

    // The eigen approach
    ArrayXd n                = ArrayXd::LinSpaced(N+1,0,N);
    double multiplier        = M_PI/N;
    Array<double, 1, N+1> nT = n.transpose();
    ArrayXd x                = - cos(multiplier*n);
    ArrayXd xsub             = x.middleRows(1, N-1);
    ArrayXd ysub             = (x1-x0)/2*xsub + (x1+x0)/2;

    ArrayXXd T               = cos((acos(x).matrix()*nT.matrix()).array());
    ArrayXXd Tsub            = cos((acos(xsub).matrix()*nT.matrix()).array());
    ArrayXd sqinx            = 1/sqrt(1-xsub*xsub);

    MatrixXd inv1x2          = (sqinx).matrix().asDiagonal();

    // Can't use the following to test elements of inv1x2
    // std::cout << inv1x2(0,0) << "\n";

    MatrixXd Usub            = inv1x2 * sin(((acos(xsub).matrix())*nT.matrix()).array()).matrix();
    MatrixXd dTsub           = Usub*(n.matrix().asDiagonal());
    MatrixXd d2Tsub          = ((sqinx*sqinx).matrix().asDiagonal())*((xsub.matrix().asDiagonal()) * (dTsub.matrix()) - (Tsub.matrix()) * ((n*n).matrix().asDiagonal()));

    MatrixXd d2T(N+1, N+1);
    RowVectorXd a            = (pow((-1),nT))*(nT*nT+1)*(nT*nT)/3;
    RowVectorXd b            = (nT*nT+1)*(nT*nT)/3;
    d2T.middleRows(1,N-1)    = d2Tsub; 
    d2T.row(0)               = a;
    d2T.row(N)               = b;

    MatrixXd D2              = d2T.matrix() * ((T.matrix()).inverse());
    MatrixXd E2              = D2.middleRows(1,N-1).middleCols(1,N-1);
    MatrixXd Y               = ysub.matrix().asDiagonal();
    MatrixXd H               = - (4 / ((x1-x0)*(x1-x0))) * E2 + k*Y;

    Eigen::EigenSolver<Eigen::MatrixXd> HE(H);
    VectorXcd D              = HE.eigenvalues();
    MatrixXcd V              = HE.eigenvectors();
    std::cout << HE.info() << std::endl;

    // Open ofstream
    ofstream Dfile;
    Dfile.open("D-output.txt");

    ofstream Vfile;
    Vfile.open("V-output.txt");

    ofstream V544file;
    V544file.open("V544-output.txt");

    Dfile.precision(15);
    Dfile << D.real() << "\n";

    Vfile.precision(15);
    Vfile << V.real() << "\n";

    V544file.precision(15);

	for(int i = 1; i<N-1; i++)
    {
		V544file << ysub[i-1];
        V544file << " "        << V.col(544).row(i-1).real() << "\n";
	}
    Dfile.close();
    Vfile.close();
	V544file.close();
	system("gnuplot -p plot.gp");
	system("rsvg-convert -w 2000 -o V544-plot.png V544-plot.svg");

}