void LayoutSetting::getChildrenRadius( const QList<SymbolNode::Ptr>& childList, VectorXf& radiusVec ) { radiusVec.resize(childList.size()); for (int ithChild = 0; ithChild < childList.size(); ++ithChild) { const SymbolNode::Ptr& child = childList[ithChild]; getNodeRadius(child, radiusVec[ithChild]); } }
void ComponentLayouter::layoutByWord( QVector<Component>& compInfo, float& finalRadius ) { // qDebug()<< "by word"; int nComp = compInfo.size(); if (nComp == 1) { Component& comp = compInfo[0]; comp.m_compPos2D.resize(2); comp.m_compPos2D[0] = comp.m_compPos2D[1] = 0.f; finalRadius = comp.m_radius; return; } MatrixXd distMat(nComp, nComp); for (int i = 0; i < nComp; ++i) { distMat(i,i) = 0.f; for (int j = i+1; j < nComp; ++j) { Component& compI = compInfo[i]; Component& compJ = compInfo[j]; double cosVal = compI.m_wordAttr.cosSimilarity(compJ.m_wordAttr); distMat(i,j) = distMat(j,i) = 1.0 - cosVal; } } VectorXf rVec; VectorXi hashVec; rVec.resize(nComp); hashVec.resize(nComp); for (int ithComp = 0; ithComp < nComp; ++ithComp) { rVec[ithComp] = compInfo[ithComp].m_radius; hashVec[ithComp] = compInfo[ithComp].m_compHash; } MatrixXf finalPos2D; Layouter::mds(distMat, rVec, hashVec, finalPos2D, finalRadius, m_wordSparseFactor, 0.01f); for (int ithComp = 0; ithComp < nComp; ++ithComp) compInfo[ithComp].m_compPos2D = finalPos2D.row(ithComp); }
bool ComponentLayouter::compute( const SparseMatrix* vtxEdgeMatrix, const VectorXd* edgeWeight, const QList<SymbolNode::Ptr>& childList, MatrixXf& childPos, VectorXf& childRadius, float& totalRadius) { CHECK_ERRORS_RETURN_BOOL(m_status); // check edge data int nVtx1 = childList.size(); bool hasEdgeData = (vtxEdgeMatrix != NULL && edgeWeight != NULL ); if (hasEdgeData) { int nVtx0 = vtxEdgeMatrix->rows(); int nEdge0= vtxEdgeMatrix->cols(); int nEdge1 = edgeWeight->size(); if (!(nVtx0 == nVtx1 && nEdge0 == nEdge1 && nVtx0 > 0 && nEdge0 > 0)) m_status |= WARNING_INVALID_EDGE_DATA; } else m_status |= WARNING_NO_EDGE; bool isVtxEdgeMatValid = (m_status & (WARNING_NO_EDGE | WARNING_INVALID_EDGE_DATA)) == 0; // fill child radius LayoutSetting::getChildrenRadius(childList, childRadius); QVector<Component> compInfo; VectorXi vtxCompIdx, vtxIdx, edgeCompIdx, edgeIdx; QList<SparseMatrix> compVEMat; int nComp = 0; if (isVtxEdgeMatValid) { GraphUtility::splitConnectedComponents(*vtxEdgeMatrix, vtxCompIdx, vtxIdx, edgeCompIdx, edgeIdx, compVEMat); nComp = compVEMat.size(); } else { vtxCompIdx.resize(nVtx1); vtxIdx.resize(nVtx1); for (int ithVtx = 0; ithVtx < nVtx1; ++ithVtx) { vtxCompIdx[ithVtx] = ithVtx; vtxIdx[ithVtx] = 0; compVEMat.push_back(SparseMatrix()); } nComp = nVtx1; } // fill component information array compInfo.resize(nComp); for (int ithComp = 0; ithComp < nComp; ++ithComp) { Component& comp = compInfo[ithComp]; comp.m_vtxRadius.resize(childRadius.size()); comp.m_hashID.resize(childRadius.size()); comp.m_pVEIncidenceMat = &compVEMat[ithComp]; comp.m_edgeWeight.resize(compVEMat[ithComp].cols()); } for (int ithVtx = 0; ithVtx < vtxCompIdx.size(); ++ithVtx) { Component& comp = compInfo[vtxCompIdx[ithVtx]]; comp.m_vtxRadius[vtxIdx[ithVtx]] = childRadius[ithVtx]; unsigned vtxHash = childList[ithVtx]->getSymInfo().hash(); comp.m_hashID[vtxIdx[ithVtx]] = vtxHash; comp.m_compHash = comp.m_compHash ^ vtxHash; } if (edgeWeight) { for (int ithEdge = 0; ithEdge < edgeCompIdx.size(); ++ithEdge) { compInfo[edgeCompIdx[ithEdge]].m_edgeWeight[edgeIdx[ithEdge]] = (*edgeWeight)[ithEdge]; } } // layout within each components if (isVtxEdgeMatValid) layoutByGraph(compInfo); else { // set component result directly for (int ithComp = 0; ithComp < nComp; ++ithComp) { Component& comp = compInfo[ithComp]; float r = childRadius[ithComp]; comp.m_vtxRadius.setConstant(1, r); comp.m_radius = r; comp.m_localPos2D.setZero(1,2); } } // layout by word, first fill word attributes for (int ithChild = 0; ithChild < childList.size(); ++ithChild) { const SymbolNode::Ptr& child = childList[ithChild]; if (SymbolWordAttr::Ptr wordAttr = child->getAttr<SymbolWordAttr>()) { int ithComp = vtxCompIdx[ithChild]; Component& comp = compInfo[ithComp]; comp.m_wordAttr.unite(*wordAttr); } } float finalRadius = 1.f; layoutByWord(compInfo, finalRadius); // set children's location childPos.resize(childList.size(), 2); childRadius.resize(childList.size()); for (int ithChild = 0; ithChild < childList.size(); ++ithChild) { const SymbolNode::Ptr& child = childList[ithChild]; int compID = vtxCompIdx[ithChild]; int newID = vtxIdx[ithChild]; Component& comp = compInfo[compID]; childPos(ithChild, 0) = comp.m_compPos2D[0] + comp.m_localPos2D(newID,0); childPos(ithChild, 1) = comp.m_compPos2D[1] + comp.m_localPos2D(newID,1); } totalRadius = finalRadius; return true; }
//************************************************************************************************************* //fwd_eeg_sphere_models.c FwdEegSphereModelSet* FwdEegSphereModelSet::fwd_load_eeg_sphere_models(const QString& filename, FwdEegSphereModelSet* now) { char line[MAXLINE]; FILE *fp = NULL; char *name = NULL; VectorXf rads; VectorXf sigmas; int nlayer = 0; char *one,*two; char *tag = NULL; if (!now) now = fwd_add_default_eeg_sphere_model(now); if (filename.isEmpty()) return now; #if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) if (_access(filename.toLatin1().data(),R_OK) != OK) /* Never mind about an unaccesible file */ return now; #else if (access(filename.toLatin1().data(),R_OK) != OK) /* Never mind about an unaccesible file */ return now; #endif if ((fp = fopen(filename.toLatin1().data(),"r")) == NULL) { printf(filename.toLatin1().data()); goto bad; } while (fgets(line,MAXLINE,fp) != NULL) { if (line[0] == '#') continue; one = strtok(line,SEP); if (one != NULL) { if (!tag || strlen(tag) == 0) name = mne_strdup_2(one); else { name = MALLOC_2(strlen(one)+strlen(tag)+10,char); sprintf(name,"%s %s",one,tag); } while (1) { one = strtok(NULL,SEP); if (one == NULL) break; two = strtok(NULL,SEP); if (two == NULL) break; rads.resize(nlayer+1); sigmas.resize(nlayer+1); if (sscanf(one,"%g",rads[nlayer]) != 1) { nlayer = 0; break; } if (sscanf(two,"%g",sigmas[nlayer]) != 1) { nlayer = 0; break; } nlayer++; } if (nlayer > 0) now = fwd_add_to_eeg_sphere_model_set(now,FwdEegSphereModel::fwd_create_eeg_sphere_model(name,nlayer,rads,sigmas)); nlayer = 0; } } if (ferror(fp)) { printf(filename.toLatin1().data()); goto bad; } fclose(fp); return now; bad : { if (fp) fclose(fp); delete now; return NULL; } }
int Pca::Calculate(vector<float> &x, const unsigned int &nrows, const unsigned int &ncols, const bool is_corr, const bool is_center, const bool is_scale) { _ncols = ncols; _nrows = nrows; _is_corr = is_corr; _is_center = is_center; _is_scale = is_scale; if (x.size()!= _nrows*_ncols) { return -1; } if ((1 == _ncols) || (1 == nrows)) { return -1; } // Convert vector to Eigen 2-dimensional matrix //Map<MatrixXf> _xXf(x.data(), _nrows, _ncols); _xXf.resize(_nrows, _ncols); for (unsigned int i = 0; i < _nrows; ++i) { for (unsigned int j = 0; j < _ncols; ++j) { _xXf(i, j) = x[j + i*_ncols]; } } // Mean and standard deviation for each column VectorXf mean_vector(_ncols); mean_vector = _xXf.colwise().mean(); VectorXf sd_vector(_ncols); unsigned int zero_sd_num = 0; float denom = static_cast<float>((_nrows > 1)? _nrows - 1: 1); for (unsigned int i = 0; i < _ncols; ++i) { VectorXf curr_col = VectorXf::Constant(_nrows, mean_vector(i)); // mean(x) for column x curr_col = _xXf.col(i) - curr_col; // x - mean(x) curr_col = curr_col.array().square(); // (x-mean(x))^2 sd_vector(i) = sqrt((curr_col.sum())/denom); if (0 == sd_vector(i)) { zero_sd_num++; } } // If colums with sd == 0 are too many, // don't continue calculations if (1 > _ncols-zero_sd_num) { return -1; } // Delete columns where sd == 0 MatrixXf tmp(_nrows, _ncols-zero_sd_num); VectorXf tmp_mean_vector(_ncols-zero_sd_num); unsigned int curr_col_num = 0; for (unsigned int i = 0; i < _ncols; ++i) { if (0 != sd_vector(i)) { tmp.col(curr_col_num) = _xXf.col(i); tmp_mean_vector(curr_col_num) = mean_vector(i); curr_col_num++; } else { _eliminated_columns.push_back(i); } } _ncols -= zero_sd_num; _xXf = tmp; mean_vector = tmp_mean_vector; tmp.resize(0, 0); tmp_mean_vector.resize(0); // Shift to zero if (true == _is_center) { for (unsigned int i = 0; i < _ncols; ++i) { _xXf.col(i) -= VectorXf::Constant(_nrows, mean_vector(i)); } } // Scale to unit variance if ( (false == _is_corr) || (true == _is_scale)) { for (unsigned int i = 0; i < _ncols; ++i) { _xXf.col(i) /= sqrt(_xXf.col(i).array().square().sum()/denom); } } #ifdef DEBUG cout << "\nScaled matrix:\n"; cout << _xXf << endl; cout << "\nMean before scaling:\n" << mean_vector.transpose(); cout << "\nStandard deviation before scaling:\n" << sd_vector.transpose(); #endif // When _nrows < _ncols then svd will be used. // If corr is true and _nrows > _ncols then will be used correlation matrix // (TODO): What about covariance? if ( (_nrows < _ncols) || (false == _is_corr)) { // Singular Value Decomposition is on _method = "svd"; JacobiSVD<MatrixXf> svd(_xXf, ComputeThinV); VectorXf eigen_singular_values = svd.singularValues(); VectorXf tmp_vec = eigen_singular_values.array().square(); float tmp_sum = tmp_vec.sum(); tmp_vec /= tmp_sum; // PC's standard deviation and // PC's proportion of variance _kaiser = 0; unsigned int lim = (_nrows < _ncols)? _nrows : _ncols; for (unsigned int i = 0; i < lim; ++i) { _sd.push_back(eigen_singular_values(i)/sqrt(denom)); if (_sd[i] >= 1) { _kaiser = i + 1; } _prop_of_var.push_back(tmp_vec(i)); } #ifdef DEBUG cout << "\n\nStandard deviations for PCs:\n"; copy(_sd.begin(), _sd.end(),std::ostream_iterator<float>(std::cout," ")); cout << "\n\nKaiser criterion: PC #" << _kaiser << endl; #endif tmp_vec.resize(0); // PC's cumulative proportion _thresh95 = 1; _cum_prop.push_back(_prop_of_var[0]); for (unsigned int i = 1; i < _prop_of_var.size(); ++i) { _cum_prop.push_back(_cum_prop[i-1]+_prop_of_var[i]); if (_cum_prop[i] < 0.95) { _thresh95 = i+1; } } #ifdef DEBUG cout << "\nCumulative proportion:\n"; copy(_cum_prop.begin(), _cum_prop.end(),std::ostream_iterator<float>(std::cout," ")); cout << "\n\nThresh95 criterion: PC #" << _thresh95 << endl; #endif // Scores MatrixXf eigen_scores = _xXf * svd.matrixV(); #ifdef DEBUG cout << "\n\nRotated values (scores):\n" << eigen_scores; #endif _scores.reserve(lim*lim); for (unsigned int i = 0; i < lim; ++i) { for (unsigned int j = 0; j < lim; ++j) { _scores.push_back(eigen_scores(i, j)); } } eigen_scores.resize(0, 0); #ifdef DEBUG cout << "\n\nScores in vector:\n"; copy(_scores.begin(), _scores.end(),std::ostream_iterator<float>(std::cout," ")); cout << "\n"; #endif } else { // COR OR COV MATRICES ARE HERE _method = "cor"; // Calculate covariance matrix MatrixXf eigen_cov; // = MatrixXf::Zero(_ncols, _ncols); VectorXf sds; // (TODO) Should be weighted cov matrix, even if is_center == false eigen_cov = (1.0 /(_nrows/*-1*/)) * _xXf.transpose() * _xXf; sds = eigen_cov.diagonal().array().sqrt(); MatrixXf outer_sds = sds * sds.transpose(); eigen_cov = eigen_cov.array() / outer_sds.array(); outer_sds.resize(0, 0); // ?If data matrix is scaled, covariance matrix is equal to correlation matrix #ifdef DEBUG cout << eigen_cov << endl; #endif EigenSolver<MatrixXf> edc(eigen_cov); VectorXf eigen_eigenvalues = edc.eigenvalues().real(); #ifdef DEBUG cout << endl << eigen_eigenvalues.transpose() << endl; #endif MatrixXf eigen_eigenvectors = edc.eigenvectors().real(); #ifdef DEBUG cout << endl << eigen_eigenvectors << endl; #endif // The eigenvalues and eigenvectors are not sorted in any particular order. // So, we should sort them typedef pair<float, int> eigen_pair; vector<eigen_pair> ep; for (unsigned int i = 0 ; i < _ncols; ++i) { ep.push_back(make_pair(eigen_eigenvalues(i), i)); } sort(ep.begin(), ep.end()); // Ascending order by default // Sort them all in descending order MatrixXf eigen_eigenvectors_sorted = MatrixXf::Zero(eigen_eigenvectors.rows(), eigen_eigenvectors.cols()); VectorXf eigen_eigenvalues_sorted = VectorXf::Zero(_ncols); int colnum = 0; int i = ep.size()-1; for (; i > -1; i--) { eigen_eigenvalues_sorted(colnum) = ep[i].first; eigen_eigenvectors_sorted.col(colnum++) += eigen_eigenvectors.col(ep[i].second); } #ifdef DEBUG cout << endl << eigen_eigenvalues_sorted.transpose() << endl; cout << endl << eigen_eigenvectors_sorted << endl; #endif // We don't need not sorted arrays anymore eigen_eigenvalues.resize(0); eigen_eigenvectors.resize(0, 0); _sd.clear(); _prop_of_var.clear(); _kaiser = 0; float tmp_sum = eigen_eigenvalues_sorted.sum(); for (unsigned int i = 0; i < _ncols; ++i) { _sd.push_back(sqrt(eigen_eigenvalues_sorted(i))); if (_sd[i] >= 1) { _kaiser = i + 1; } _prop_of_var.push_back(eigen_eigenvalues_sorted(i)/tmp_sum); } #ifdef DEBUG cout << "\nStandard deviations for PCs:\n"; copy(_sd.begin(), _sd.end(), std::ostream_iterator<float>(std::cout," ")); cout << "\nProportion of variance:\n"; copy(_prop_of_var.begin(), _prop_of_var.end(), std::ostream_iterator<float>(std::cout," ")); cout << "\nKaiser criterion: PC #" << _kaiser << endl; #endif // PC's cumulative proportion _cum_prop.clear(); _thresh95 = 1; _cum_prop.push_back(_prop_of_var[0]); for (unsigned int i = 1; i < _prop_of_var.size(); ++i) { _cum_prop.push_back(_cum_prop[i-1]+_prop_of_var[i]); if (_cum_prop[i] < 0.95) { _thresh95 = i+1; } } #ifdef DEBUG cout << "\n\nCumulative proportions:\n"; copy(_cum_prop.begin(), _cum_prop.end(), std::ostream_iterator<float>(std::cout," ")); cout << "\n\n95% threshold: PC #" << _thresh95 << endl; #endif // Scores for PCA with correlation matrix // Scale before calculating new values for (unsigned int i = 0; i < _ncols; ++i) { _xXf.col(i) /= sds(i); } sds.resize(0); MatrixXf eigen_scores = _xXf * eigen_eigenvectors_sorted; #ifdef DEBUG cout << "\n\nRotated values (scores):\n" << eigen_scores; #endif _scores.clear(); _scores.reserve(_ncols*_nrows); for (unsigned int i = 0; i < _nrows; ++i) { for (unsigned int j = 0; j < _ncols; ++j) { _scores.push_back(eigen_scores(i, j)); } } eigen_scores.resize(0, 0); #ifdef DEBUG cout << "\n\nScores in vector:\n"; copy(_scores.begin(), _scores.end(), std::ostream_iterator<float>(std::cout," ")); cout << "\n"; #endif } return 0; }
//************************************************************************************************************* //fwd_eeg_sphere_models.c FwdEegSphereModelSet* FwdEegSphereModelSet::fwd_load_eeg_sphere_models(const QString& filename, FwdEegSphereModelSet* now) { char line[MAXLINE]; FILE *fp = NULL; QString name; VectorXf rads; VectorXf sigmas; int nlayer = 0; char *one, *two; QString tag; if (!now) now = fwd_add_default_eeg_sphere_model(now); if (filename.isEmpty()) return now; QFile t_file(filename); if (!t_file.isReadable()) /* Never mind about an unaccesible file */ return now; if ((fp = fopen(filename.toUtf8().data(),"r")) == NULL) { printf(filename.toUtf8().data()); goto bad; } while (fgets(line,MAXLINE,fp) != NULL) { if (line[0] == '#') continue; one = strtok(line,SEP); if (one != NULL) { if (tag.isEmpty() || tag.size() == 0) name = one; else { name = QString("%1 %2").arg(one).arg(tag); } while (1) { one = strtok(NULL,SEP); if (one == NULL) break; two = strtok(NULL,SEP); if (two == NULL) break; rads.resize(nlayer+1); sigmas.resize(nlayer+1); if (sscanf(one,"%g",rads[nlayer]) != 1) { nlayer = 0; break; } if (sscanf(two,"%g",sigmas[nlayer]) != 1) { nlayer = 0; break; } nlayer++; } if (nlayer > 0) now = fwd_add_to_eeg_sphere_model_set(now,FwdEegSphereModel::fwd_create_eeg_sphere_model(name,nlayer,rads,sigmas)); nlayer = 0; } } if (ferror(fp)) { printf(filename.toUtf8().data()); goto bad; } fclose(fp); return now; bad : { if (fp) fclose(fp); delete now; return NULL; } }