pair< SparseMatrix<double>, SparseVector<double> > SVD_Nystrom_columnSpace_sparse(SparseMatrix<double> A, SparseMatrix<double> B){ // right singular vectors of SVD of A' * B, where A \in R^(n * d_A), B \in R^(n * d_B); double norm_denom = 1.0 / (double)A.rows(); pair< SparseMatrix<double>, SparseVector<double> > USigma; // Generate random_mat; int k_prime = 2 * KHID; SparseMatrix<double> random_mat = (B.cols() > 20 * KHID) ? random_embedding_mat((long)B.cols(), k_prime) : random_embedding_mat_dense((long)B.cols(), k_prime); SparseMatrix<double> tmp = B * random_mat; SparseMatrix<double> Q = A.transpose() * tmp; Q.makeCompressed(); Q.prune(TOLERANCE); tmp.resize(0, 0); Q = orthogonalize_cols(Q); SparseMatrix<double> tmp2 = B * Q; SparseMatrix<double> C = A.transpose() * tmp2; C = norm_denom * C; MatrixXd Z = (MatrixXd)C.transpose() * C; pair<pair<Eigen::MatrixXd, Eigen::MatrixXd>, Eigen::VectorXd> svd_Z = latenttree_svd(Z); MatrixXd V = (svd_Z.first.second).leftCols(KHID); SparseMatrix<double> V_sparse = V.sparseView(); VectorXd S = svd_Z.second.head(KHID); USigma.second = S.cwiseSqrt().sparseView(); // S.array().sqrt(); MatrixXd diag_inv_S_sqrt = pinv_vector(S.cwiseSqrt()).asDiagonal(); SparseMatrix<double> diag_inv_S_sqrt_s = diag_inv_S_sqrt.sparseView(); USigma.first = C * (V_sparse)* diag_inv_S_sqrt_s; return USigma; }
pair< SparseMatrix<double>, SparseVector<double> > SVD_symNystrom_sparse(SparseMatrix<double> M) { pair< SparseMatrix<double>, SparseVector<double> > USigma; int k_prime = 2 * KHID; if (M.rows() > 20 * KHID) { SparseMatrix<double> Q; if (M.cols() > k_prime){ SparseMatrix<double> random_mat = random_embedding_mat((long)M.cols(), k_prime); Q = M * random_mat; } else{ Q = M; } Q = orthogonalize_cols((MatrixXd) Q); SparseMatrix<double> C; if (M.cols() < Q.rows()){ SparseMatrix<double> QQ = Q.block(0, 0, M.cols(), Q.cols()); C = M * QQ; } else { C = M * Q; } MatrixXd Z = (MatrixXd)C.transpose() * C; // cout << "-----------Z:nonZeros()" << Z.nonZeros() << "-------------" << endl; pair<pair<Eigen::MatrixXd, Eigen::MatrixXd>, Eigen::VectorXd> svd_Z = latenttree_svd(Z); // cout << "-----------svd_Z.first.first:nonZeros()" << svd_Z.first.first.nonZeros() << "-------------" << endl; // cout << "-----------svd_Z.first.second:nonZeros()" << svd_Z.first.second.nonZeros() << "-------------" << endl; // cout << "-----------svd_Z.second:nonZeros()" << svd_Z.second.nonZeros() << "-------------" << endl; MatrixXd V = (svd_Z.first.second).leftCols(KHID); // cout << "-----------V:nonZeros()" << V.nonZeros() << "-------------" << endl; SparseMatrix<double> V_sparse = V.sparseView(); VectorXd S = svd_Z.second.head(KHID); // cout << "-----------S:nonZeros()" << S.nonZeros() << "-------------" << endl; USigma.second = S.cwiseSqrt().sparseView(); // S.array().sqrt(); // cout << "-----------S.cwiseSqrt():nonZeros()" << S.cwiseSqrt().nonZeros() << "-------------" << endl; // cout << "S" << endl << S << endl; MatrixXd diag_inv_S_sqrt = pinv_vector(S.cwiseSqrt()).asDiagonal(); // cout << "-----------diag_inv_S_sqrt:nonZeros()" << diag_inv_S_sqrt.nonZeros() << "-------------" << endl; SparseMatrix<double> diag_inv_S_sqrt_s = diag_inv_S_sqrt.sparseView(); USigma.first = C * (V_sparse)* diag_inv_S_sqrt_s; // cout << "-----------USigma.first.:nonZeros()" << USigma.first.nonZeros() << "-------------" << endl; // cout << "-----------USigma.second:nonZeros()" << USigma.second.nonZeros() << "-------------" << endl; } else { pair<pair<SparseMatrix<double>, SparseMatrix<double> >, SparseVector<double> > mv = latenttree_svd(M); USigma.first = mv.first.first.leftCols(KHID); USigma.second = mv.second.head(KHID); } return USigma; }
MNESourceEstimate MinimumNorm::calculateInverse(const MatrixXd &data, float tmin, float tstep) const { if(!inverseSetup) { qWarning("Inverse not setup -> call doInverseSetup first!"); return MNESourceEstimate(); } MatrixXd sol = K * data; //apply imaging kernel if (inv.source_ori == FIFFV_MNE_FREE_ORI) { printf("combining the current components..."); MatrixXd sol1(sol.rows()/3,sol.cols()); for(qint32 i = 0; i < sol.cols(); ++i) { VectorXd* tmp = MNEMath::combine_xyz(sol.block(0,i,sol.rows(),1)); sol1.block(0,i,sol.rows()/3,1) = tmp->cwiseSqrt(); delete tmp; } sol.resize(sol1.rows(),sol1.cols()); sol = sol1; } if (m_bdSPM) { printf("(dSPM)..."); sol = inv.noisenorm*sol; } else if (m_bsLORETA) { printf("(sLORETA)..."); sol = inv.noisenorm*sol; } printf("[done]\n"); //Results VectorXi p_vecVertices(inv.src[0].vertno.size() + inv.src[1].vertno.size()); p_vecVertices << inv.src[0].vertno, inv.src[1].vertno; // VectorXi p_vecVertices(); // for(qint32 h = 0; h < inv.src.size(); ++h) // t_qListVertices.push_back(inv.src[h].vertno); return MNESourceEstimate(sol, p_vecVertices, tmin, tstep); }
MatrixXd sample_MME_multiple_diagR( MatrixXd Y, SpMat W, SpMat chol_C, VectorXd pe, MSpMat chol_K_inv, VectorXd tot_Eta_prec, MatrixXd randn_theta, MatrixXd randn_e ){ MatrixXd theta_star = chol_K_inv.triangularView<Upper>().solve(randn_theta); MatrixXd e_star = randn_e * pe.cwiseSqrt().cwiseInverse().asDiagonal(); MatrixXd W_theta_star = W * theta_star; MatrixXd Y_resid = Y - W_theta_star - e_star; MatrixXd WtRiy = W.transpose() * (Y_resid * pe.asDiagonal()); MatrixXd theta_tilda = chol_C.transpose().triangularView<Upper>().solve(chol_C.triangularView<Lower>().solve(WtRiy)); MatrixXd theta = theta_tilda * tot_Eta_prec.cwiseInverse().asDiagonal() + theta_star; return theta; }
/** * The function main marks the entry point of the program. * By default, main has the storage class extern. * * @param [in] argc (argument count) is an integer that indicates how many arguments were entered on the command line when the program was started. * @param [in] argv (argument vector) is an array of pointers to arrays of character objects. The array objects are null-terminated strings, representing the arguments that were entered on the command line when the program was started. * @return the value that was set to exit() (which is 0 if exit() is called via quit()). */ int main(int argc, char *argv[]) { QCoreApplication a(argc, argv); // fname_data - Name of the data file // setno - Data set number // fname_inv - Inverse operator file name // nave - Number of averages (scales the noise covariance) // If negative, the number of averages in the data will be // used // lambda2 - The regularization factor // dSPM - do dSPM? // sLORETA - do sLORETA? QFile t_fileEvoked("./MNE-sample-data/MEG/sample/sample_audvis-ave.fif"); QFile t_fileInv("./MNE-sample-data/MEG/sample/sample_audvis-meg-eeg-oct-6-meg-eeg-inv.fif"); qint32 nave = 40; float snr = 3.0f; float lambda2 = pow(1.0f / snr, 2.0f); bool dSPM = false; bool sLORETA = true; // // Read the data first // FiffEvokedSet evokedSet(t_fileEvoked); // // Then the inverse operator // MNEInverseOperator inv_raw(t_fileInv); // // Iterate over found data sets // for(qint32 setno = 0; setno < evokedSet.evoked.size(); ++setno) { printf(">> Computing inverse for %s data set <<\n", evokedSet.evoked[setno].comment.toLatin1().constData()); // // Set up the inverse according to the parameters // if (nave < 0) nave = evokedSet.evoked[setno].nave; MNEInverseOperator inv = inv_raw.prepare_inverse_operator(nave,lambda2,dSPM,sLORETA); // // Pick the correct channels from the data // FiffEvokedSet newEvokedSet = evokedSet.pick_channels(inv.noise_cov->names); evokedSet = newEvokedSet; printf("Picked %d channels from the data\n",evokedSet.info.nchan); printf("Computing inverse..."); // // Simple matrix multiplication followed by combination of the // three current components // // This does all the data transformations to compute the weights for the // eigenleads // SparseMatrix<double> reginv(inv.reginv.rows(),inv.reginv.rows()); // put this in the MNE algorithm class derived from inverse algorithm //ToDo put this into a function of inv data qint32 i; for(i = 0; i < inv.reginv.rows(); ++i) reginv.insert(i,i) = inv.reginv(i,0); MatrixXd trans = reginv*inv.eigen_fields->data*inv.whitener*inv.proj*evokedSet.evoked[setno].data; // // Transformation into current distributions by weighting the eigenleads // with the weights computed above // MatrixXd sol; if (inv.eigen_leads_weighted) { // // R^0.5 has been already factored in // printf("(eigenleads already weighted)..."); sol = inv.eigen_leads->data*trans; } else { // // R^0.5 has to factored in // printf("(eigenleads need to be weighted)..."); SparseMatrix<double> sourceCov(inv.source_cov->data.rows(),inv.source_cov->data.rows()); for(i = 0; i < inv.source_cov->data.rows(); ++i) sourceCov.insert(i,i) = sqrt(inv.source_cov->data(i,0)); sol = sourceCov*inv.eigen_leads->data*trans; } if (inv.source_ori == FIFFV_MNE_FREE_ORI) { printf("combining the current components..."); MatrixXd sol1(sol.rows()/3,sol.cols()); for(i = 0; i < sol.cols(); ++i) { VectorXd* tmp = MNE::combine_xyz(sol.block(0,i,sol.rows(),1)); sol1.block(0,i,sol.rows()/3,1) = tmp->cwiseSqrt(); delete tmp; } sol.resize(sol1.rows(),sol1.cols()); sol = sol1; } if (dSPM) { printf("(dSPM)..."); sol = inv.noisenorm*sol; } else if (sLORETA) { printf("(sLORETA)..."); sol = inv.noisenorm*sol; } printf("[done]\n"); //Results float tmin = ((float)evokedSet.evoked[setno].first) / evokedSet.info.sfreq; float tstep = 1/evokedSet.info.sfreq; std::cout << "\npart ( block( 0, 0, 10, 10) ) of the inverse solution:\n" << sol.block(0,0,10,10) << std::endl; printf("tmin = %f s\n", tmin); printf("tstep = %f s\n", tstep); } return a.exec(); }
SourceEstimate MinimumNorm::calculateInverse(const FiffEvoked &p_fiffEvoked, bool pick_normal) const { // // Set up the inverse according to the parameters // qint32 nave = p_fiffEvoked.nave; if(!m_inverseOperator.check_ch_names(p_fiffEvoked.info)) { qWarning("Channel name check failed."); return SourceEstimate(); } MNEInverseOperator inv = m_inverseOperator.prepare_inverse_operator(nave, m_fLambda, m_bdSPM, m_bsLORETA); // // Pick the correct channels from the data // FiffEvoked t_fiffEvoked = p_fiffEvoked.pick_channels(inv.noise_cov->names); printf("Picked %d channels from the data\n",t_fiffEvoked.info.nchan); printf("Computing inverse..."); MatrixXd K; SparseMatrix<double> noise_norm; QList<VectorXi> vertno; Label label; inv.assemble_kernel(label, m_sMethod, pick_normal, K, noise_norm, vertno); MatrixXd sol = K * t_fiffEvoked.data; //apply imaging kernel if (inv.source_ori == FIFFV_MNE_FREE_ORI) { printf("combining the current components..."); MatrixXd sol1(sol.rows()/3,sol.cols()); for(qint32 i = 0; i < sol.cols(); ++i) { VectorXd* tmp = MNEMath::combine_xyz(sol.block(0,i,sol.rows(),1)); sol1.block(0,i,sol.rows()/3,1) = tmp->cwiseSqrt(); delete tmp; } sol.resize(sol1.rows(),sol1.cols()); sol = sol1; } if (m_bdSPM) { printf("(dSPM)..."); sol = inv.noisenorm*sol; } else if (m_bsLORETA) { printf("(sLORETA)..."); sol = inv.noisenorm*sol; } printf("[done]\n"); //Results float tmin = ((float)t_fiffEvoked.first) / t_fiffEvoked.info.sfreq; float tstep = 1/t_fiffEvoked.info.sfreq; QList<VectorXi> t_qListVertices; for(qint32 h = 0; h < inv.src.size(); ++h) t_qListVertices.push_back(inv.src[h].vertno); return SourceEstimate(sol, t_qListVertices, tmin, tstep); }
MNEInverseOperator MNEInverseOperator::prepare_inverse_operator(qint32 nave ,float lambda2, bool dSPM, bool sLORETA) const { if(nave <= 0) { printf("The number of averages should be positive\n"); return MNEInverseOperator(); } printf("Preparing the inverse operator for use...\n"); MNEInverseOperator inv(*this); // // Scale some of the stuff // float scale = ((float)inv.nave)/((float)nave); inv.noise_cov->data *= scale; inv.noise_cov->eig *= scale; inv.source_cov->data *= scale; // if (inv.eigen_leads_weighted) inv.eigen_leads->data *= sqrt(scale); // printf("\tScaled noise and source covariance from nave = %d to nave = %d\n",inv.nave,nave); inv.nave = nave; // // Create the diagonal matrix for computing the regularized inverse // VectorXd tmp = inv.sing.cwiseProduct(inv.sing) + VectorXd::Constant(inv.sing.size(), lambda2); // if(inv.reginv) // delete inv.reginv; inv.reginv = VectorXd(inv.sing.cwiseQuotient(tmp)); printf("\tCreated the regularized inverter\n"); // // Create the projection operator // qint32 ncomp = FiffProj::make_projector(inv.projs, inv.noise_cov->names, inv.proj); if (ncomp > 0) printf("\tCreated an SSP operator (subspace dimension = %d)\n",ncomp); // // Create the whitener // // if(inv.whitener) // delete inv.whitener; inv.whitener = MatrixXd::Zero(inv.noise_cov->dim, inv.noise_cov->dim); qint32 nnzero, k; if (inv.noise_cov->diag == 0) { // // Omit the zeroes due to projection // nnzero = 0; for (k = ncomp; k < inv.noise_cov->dim; ++k) { if (inv.noise_cov->eig[k] > 0) { inv.whitener(k,k) = 1.0/sqrt(inv.noise_cov->eig[k]); ++nnzero; } } // // Rows of eigvec are the eigenvectors // inv.whitener *= inv.noise_cov->eigvec; printf("\tCreated the whitener using a full noise covariance matrix (%d small eigenvalues omitted)\n", inv.noise_cov->dim - nnzero); } else { // // No need to omit the zeroes due to projection // for (k = 0; k < inv.noise_cov->dim; ++k) inv.whitener(k,k) = 1.0/sqrt(inv.noise_cov->data(k,0)); printf("\tCreated the whitener using a diagonal noise covariance matrix (%d small eigenvalues discarded)\n",ncomp); } // // Finally, compute the noise-normalization factors // if (dSPM || sLORETA) { VectorXd noise_norm = VectorXd::Zero(inv.eigen_leads->nrow); VectorXd noise_weight; if (dSPM) { printf("\tComputing noise-normalization factors (dSPM)..."); noise_weight = VectorXd(inv.reginv); } else { printf("\tComputing noise-normalization factors (sLORETA)..."); VectorXd tmp = (VectorXd::Constant(inv.sing.size(), 1) + inv.sing.cwiseProduct(inv.sing)/lambda2); noise_weight = inv.reginv.cwiseProduct(tmp.cwiseSqrt()); } VectorXd one; if (inv.eigen_leads_weighted) { for (k = 0; k < inv.eigen_leads->nrow; ++k) { one = inv.eigen_leads->data.block(k,0,1,inv.eigen_leads->data.cols()).cwiseProduct(noise_weight); noise_norm[k] = sqrt(one.dot(one)); } } else { // qDebug() << 32; double c; for (k = 0; k < inv.eigen_leads->nrow; ++k) { // qDebug() << 321; c = sqrt(inv.source_cov->data(k,0)); // qDebug() << 322; // qDebug() << "inv.eigen_leads->data" << inv.eigen_leads->data.rows() << "x" << inv.eigen_leads->data.cols(); // qDebug() << "noise_weight" << noise_weight.rows() << "x" << noise_weight.cols(); one = c*(inv.eigen_leads->data.row(k).transpose()).cwiseProduct(noise_weight);//ToDo eigenleads data -> pointer noise_norm[k] = sqrt(one.dot(one)); // qDebug() << 324; } } // qDebug() << 4; // // Compute the final result // VectorXd noise_norm_new; if (inv.source_ori == FIFFV_MNE_FREE_ORI) { // // The three-component case is a little bit more involved // The variances at three consequtive entries must be squeared and // added together // // Even in this case return only one noise-normalization factor // per source location // VectorXd* t = MNEMath::combine_xyz(noise_norm.transpose()); noise_norm_new = t->cwiseSqrt();//double otherwise values are getting too small delete t; // // This would replicate the same value on three consequtive // entries // // noise_norm = kron(sqrt(mne_combine_xyz(noise_norm)),ones(3,1)); } VectorXd vOnes = VectorXd::Ones(noise_norm_new.size()); VectorXd tmp = vOnes.cwiseQuotient(noise_norm_new.cwiseAbs()); // if(inv.noisenorm) // delete inv.noisenorm; typedef Eigen::Triplet<double> T; std::vector<T> tripletList; tripletList.reserve(noise_norm_new.size()); for(qint32 i = 0; i < noise_norm_new.size(); ++i) tripletList.push_back(T(i, i, tmp[i])); inv.noisenorm = SparseMatrix<double>(noise_norm_new.size(),noise_norm_new.size()); inv.noisenorm.setFromTriplets(tripletList.begin(), tripletList.end()); printf("[done]\n"); } else { // if(inv.noisenorm) // delete inv.noisenorm; inv.noisenorm = SparseMatrix<double>(); } return inv; }