void SOGP::reduceBasisVectorSet(unsigned int index) { unsigned int end = this->current_size-1; VectorXd zero_vector = VectorXd::Zero(this->current_size); VectorXd alpha_star = this->alpha.row(index); VectorXd last_item = this->alpha.row(end); alpha.block(index,0,1,this->output_dim) = last_item.transpose(); alpha.block(end,0,1, this->output_dim) = VectorXd::Zero(this->output_dim).transpose(); double cstar = this->C(index, index); VectorXd Cstar = this->C.col(index); Cstar(index) = Cstar(end); Cstar.conservativeResize(end); VectorXd Crep = C.col(end); Crep(index) = Crep(end); C.block(index, 0, 1, this->current_size) = Crep.transpose(); C.block(0, index, this->current_size, 1) = Crep; C.block(end, 0, 1, this->current_size) = zero_vector.transpose(); C.block(0, end, this->current_size,1) = zero_vector; double qstar = this->Q(index, index); VectorXd Qstar = this->Q.col(index); Qstar(index) = Qstar(end); Qstar.conservativeResize(end); VectorXd Qrep = Q.col(end); Qrep(index) = Qrep(end); Q.block(index, 0, 1, this->current_size) = Qrep.transpose(); Q.block(0, index, this->current_size, 1) = Qrep; Q.block(end, 0, 1, this->current_size) = zero_vector.transpose(); Q.block(0, end, this->current_size,1) = zero_vector; VectorXd qc = (Qstar + Cstar)/(qstar + cstar); for (unsigned int i=0; i<this->output_dim; i++) { VectorXd diffAlpha = alpha.block(0,i,end,1) - alpha_star(i)*qc; alpha.block(0,i,end,1) = diffAlpha; } MatrixXd oldC = C.block(0,0, end, end); C.block(0,0, end,end) = oldC + (Qstar*Qstar.transpose())/qstar - ((Qstar + Cstar)*((Qstar + Cstar).transpose()))/(qstar+cstar); MatrixXd oldQ = Q.block(0,0,end,end); Q.block(0,0, end, end) = oldQ - (Qstar*Qstar.transpose())/qstar; this->basis_vectors[index] = this->basis_vectors[end]; this->basis_vectors.pop_back(); this->current_size = end; }
void removeElem(VectorXd &vec, const int elemToRemove) { int elems = vec.size() - 1; if (elemToRemove <= elems) { // Copy the last element into the element to remove if (elemToRemove < elems) { vec(elemToRemove) = vec(elems); } vec.conservativeResize(elems); } }
// phi contains mu1, mu2, rho // prior contains mu0, T0 and S0 double bbivF(VectorXd eps, VectorXd phi, List prior) { // BVN density in terms of rho const double rho=phi[2], rho2=pow(rho, 2.); MatrixXd T; T.setConstant(2, 2, pow(1.-rho2, -1.)); T(0, 1) *= -rho; T(1, 0) *= -rho; VectorXd mu=phi; mu.conservativeResize(2); eps.conservativeResize(2); // semi eps -= mu; return pow(1.-rho2, -0.5)*exp(-0.5*(eps.transpose()*T*eps)[0]); }
void addConstraint(const measurement& _meas) { t_managing_ = clock(); assert(_meas.jacobians.size() == _meas.nodes_idx.size()); assert(_meas.error.size() == _meas.dim); n_measurements++; measurements_.push_back(_meas); measurements_.back().location = A_.rows(); // Resize problem A_.conservativeResize(A_.rows() + _meas.dim, A_.cols()); b_.conservativeResize(b_.size() + _meas.dim); A_nodes_.conservativeResize(n_measurements,n_nodes_); // ADD MEASUREMENTS first_ordered_node_ = n_nodes_; for (unsigned int j = 0; j < _meas.nodes_idx.size(); j++) { assert(acc_node_permutation_.indices()(_meas.nodes_idx.at(j)) == nodes_.at(_meas.nodes_idx.at(j)).order); int ordered_node = nodes_.at(_meas.nodes_idx.at(j)).order;//acc_permutation_nodes_.indices()(_nodes_idx.at(j)); addSparseBlock(_meas.jacobians.at(j), A_, A_.rows()-_meas.dim, nodes_.at(_meas.nodes_idx.at(j)).location); A_nodes_.coeffRef(A_nodes_.rows()-1, ordered_node) = 1; assert(_meas.jacobians.at(j).cols() == nodes_.at(_meas.nodes_idx.at(j)).dim); assert(_meas.jacobians.at(j).rows() == _meas.dim); // store minimum ordered node if (first_ordered_node_ > ordered_node) first_ordered_node_ = ordered_node; } // error b_.tail(_meas.dim) = _meas.error; time_managing_ += ((double) clock() - t_managing_) / CLOCKS_PER_SEC; }
void add_state_unit(const int node_dim, const int node_idx) { t_managing_ = clock(); n_nodes_++; nodes_.push_back(node(node_idx, node_dim, x_incr_.size(), n_nodes_-1)); // Resize accumulated permutations augment_permutation(acc_node_permutation_, n_nodes_); // Resize state x_incr_.conservativeResize(x_incr_.size() + node_dim); // Resize problem A_.conservativeResize(A_.rows(), A_.cols() + node_dim); R_.conservativeResize(R_.cols() + node_dim, R_.cols() + node_dim); //A_nodes_.conservativeResize(n_measurements, n_nodes); // not necessary time_managing_ += ((double) clock() - t_managing_) / CLOCKS_PER_SEC; }
VectorXd bbivP0(MatrixXd A, List prior, VectorXd phi) { const int n=A.rows(); //, rho_MH=as<int>(prior["rho_MH"]); #ifdef DEBUG_NEAL8 Rcout << "\npre:" << phi; #endif /* this Gibbs conditional is WRONG; see semi block below RowVectorXd mu=phi; mu.conservativeResize(2); MatrixXd D(n, 2); for(int i=0; i<n; ++i) D.row(i) = eps.row(i)-mu; MatrixXd DtD=D.transpose()*D; double rho=phi[2], zeta=corrMH(DtD(0, 0)+DtD(1, 1), DtD(0, 1), n, JKB(rho)); rho=JKB_inverse(zeta); */ MatrixXd eps=A.block(0, 0, n, 2); // semi const double rho=phi[2], zeta=JKB(rho); // semi Vector2d mean_eps=mean(eps).transpose(), mu0=as< Map<VectorXd> >(prior["mu0"]); Matrix2d T0=as< Map<MatrixXd> >(prior["T0"]), Tj, Sigma, Tprec; Tj << 1., -rho, -rho, 1.; Tj *= (double(n)/(1.-pow(rho, 2.))); Tprec=T0+Tj; Sigma=Tprec.inverse(); Vector2d mean_mu=Sigma*(T0*mu0+Tj*mean_eps); phi=rnorm2d(mean_mu, Sigma); phi.conservativeResize(3); /* semi block begins */ const double beta=as<double>(prior["beta"]); const VectorXd t=A.col(2), y=A.col(3), gamma=as< Map<VectorXd> >(prior["gamma"]), delta=as< Map<VectorXd> >(prior["delta"]), eta =as< Map<VectorXd> >(prior["eta"]); const int p=gamma.size(), q=delta.size(); MatrixXd D(n, 2), X=A.block(0, 4, n, p), Z=A.block(0, p+4, n, q); D.col(0).setConstant(phi[0]); D.col(1).setConstant(phi[1]); D.col(0) += (X*gamma + Z*delta); //mutmargin D.col(1) += (beta*D.col(0)+X*eta);//muymargin D.col(0) = t - D.col(0); //tdev=t-mutmargin D.col(1) = y - D.col(1) - beta*D.col(0);//ycondtdev=y-muymargin-b*tdev Matrix2d SSCP=D.transpose()*D; phi[2]=corrMN(SSCP(0, 0)+SSCP(1, 1), SSCP(0, 1), n); // #ifdef CORRBETAMH // phi[2]=corrBetaMH(SSCP(0, 0)+SSCP(1, 1), SSCP(0, 1), n, rho); // #else // phi[2]=JKB_inverse(corrMH(SSCP(0, 0)+SSCP(1, 1), SSCP(0, 1), n, zeta, rho_MH)); // #endif /* semi block ends */ #ifdef DEBUG_NEAL8 Rcout << "\npost:" << phi << "\nbeta:" << beta << "\ngamma:" << gamma << "\ndelta:" << delta << "\neta:" << eta << "\nn:" << n << "\nA:\n" << A << '\n'; #endif return phi; }
void SOGP::train(const VectorXd &state, const VectorXd &output) { //check if we have initialised the system if (!this->initialized) { throw OTLException("SOGP not yet initialised"); } double kstar = this->kernel->eval(state); //change the output format if this is a classification problem VectorXd mod_output; if (this->problem_type == SOGP::CLASSIFICATION) { mod_output = VectorXd::Zero(this->output_dim); for (unsigned int i=0; i<this->output_dim; i++) { mod_output(i) = -1; } mod_output(output(0)) = 1; } else { mod_output = output; } //we are just starting. if (this->current_size == 0) { this->alpha.block(0,0,1, this->output_dim) = (mod_output.array() / (kstar + this->noise)).transpose(); this->C.block(0,0,1,1) = VectorXd::Ones(1)*-1/(kstar + this->noise); this->Q.block(0,0,1,1) = VectorXd::Ones(1)*1/(kstar); this->basis_vectors.push_back(state); this->current_size++; return; } //Test if this is a "novel" state VectorXd k; this->kernel->eval(state, this->basis_vectors, k); //cache Ck VectorXd Ck = this->C.block(0,0, this->current_size, this->current_size)*k; VectorXd m = k.transpose()*this->alpha.block(0,0,this->current_size, this->output_dim); double s2 = kstar + (k.dot(Ck)); if (s2 < 1e-12) { //std::cout << "s2: " << s2 << std::endl; s2 = 1e-12; } double r = 0.0; VectorXd q; if (this->problem_type == SOGP::REGRESSION) { r = -1.0/(s2 + this->noise); q = (mod_output - m)*(-r); } else if (this->problem_type == SOGP::CLASSIFICATION) { double sx2 = this->noise + s2; double sx = sqrt(sx2); VectorXd z = VectorXd(this->output_dim); VectorXd Erfz = VectorXd(this->output_dim); for (unsigned int i=0; i<this->output_dim; i++) { z(i) = mod_output(i) * m(i) / sx; Erfz(i) = stdnormcdf(z(i)); //dErfz(i) = 1.0/sqrt(2*M_PI)*exp(-(z(i)*z(i))/2.0); //dErfz2(i) = dErfz(i)*(-z(i)); } /* TO CONNTINUE Erfz = Erf(z); dErfz = 1.0/sqrt(2*pi)*exp(-(z.^2)/2); dErfz2 = dErfz.*(-z); q = y/sx * (dErfz/Erfz); r = (1/sx2)*(dErfz2/dErfz - (dErfz/Erfz)^2); */ } else { throw OTL::OTLException("Whoops! My problem type is wrong. How did this happen?"); } VectorXd ehat = this->Q.block(0,0, this->current_size, this->current_size)*k; double gamma = kstar - k.dot(ehat); double eta = 1.0/(1.0 + gamma*r); if (gamma < 1e-12) { gamma = 0.0; } if (gamma >= this->epsilon*kstar) { //perform a full update VectorXd s = Ck; s.conservativeResize(this->current_size + 1); s(this->current_size) = 1; //update Q (inverse of C) ehat.conservativeResize(this->current_size+1); ehat(this->current_size) = -1; MatrixXd diffQ = Q.block(0,0,this->current_size+1, this->current_size+1) + (ehat*ehat.transpose())*(1.0/gamma); Q.block(0,0,this->current_size+1, this->current_size+1) = diffQ; //update alpha MatrixXd diffAlpha = alpha.block(0,0, this->current_size+1, this->output_dim) + (s*q.transpose()); alpha.block(0,0, this->current_size+1, this->output_dim) = diffAlpha; //update C MatrixXd diffC = C.block(0,0, this->current_size+1, this->current_size+1) + r*(s*s.transpose()); C.block(0,0, this->current_size+1, this->current_size+1) = diffC; //add to basis vectors this->basis_vectors.push_back(state); //increment current size this->current_size++; } else { //perform a sparse update VectorXd s = Ck + ehat; //update alpha MatrixXd diffAlpha = alpha.block(0,0, this->current_size, this->output_dim) + s*((q*eta).transpose()); alpha.block(0,0, this->current_size, this->output_dim) = diffAlpha; //update C MatrixXd diffC = C.block(0,0, this->current_size, this->current_size) + r*eta*(s*s.transpose()); C.block(0,0, this->current_size, this->current_size) = diffC; } //check if we need to reduce size if (this->basis_vectors.size() > this->capacity) { //std::cout << "Reduction!" << std::endl; double min_val = (alpha.row(0)).squaredNorm()/(Q(0,0) + C(0,0)); unsigned int min_index = 0; for (unsigned int i=1; i<this->basis_vectors.size(); i++) { double scorei = (alpha.row(i)).squaredNorm()/(Q(i,i) + C(i,i)); if (scorei < min_val) { min_val = scorei; min_index = i; } } this->reduceBasisVectorSet(min_index); } return; }
MatrixXd MNEInverseOperator::cluster_kernel(const AnnotationSet &p_AnnotationSet, qint32 p_iClusterSize, MatrixXd& p_D) const { MatrixXd p_outMT = this->m_K.transpose(); QList<MNEClusterInfo> t_qListMNEClusterInfo; MNEClusterInfo t_MNEClusterInfo; t_qListMNEClusterInfo.append(t_MNEClusterInfo); t_qListMNEClusterInfo.append(t_MNEClusterInfo); // // Check consisty // if(this->isFixedOrient()) { printf("Error: Fixed orientation not implemented jet!\n"); return p_outMT; } // qDebug() << "p_outMT" << p_outMT.rows() << "x" << p_outMT.cols(); // MatrixXd t_G_Whitened(0,0); // bool t_bUseWhitened = false; // // // //Whiten gain matrix before clustering -> cause diffenerent units Magnetometer, Gradiometer and EEG // // // if(!p_pNoise_cov.isEmpty() && !p_pInfo.isEmpty()) // { // FiffInfo p_outFwdInfo; // FiffCov p_outNoiseCov; // MatrixXd p_outWhitener; // qint32 p_outNumNonZero; // //do whitening with noise cov // this->prepare_forward(p_pInfo, p_pNoise_cov, false, p_outFwdInfo, t_G_Whitened, p_outNoiseCov, p_outWhitener, p_outNumNonZero); // printf("\tWhitening the forward solution.\n"); // t_G_Whitened = p_outWhitener*t_G_Whitened; // t_bUseWhitened = true; // } // // Assemble input data // qint32 count; qint32 offset; MatrixXd t_MT_new; for(qint32 h = 0; h < this->src.size(); ++h ) { count = 0; offset = 0; // Offset for continuous indexing; if(h > 0) for(qint32 j = 0; j < h; ++j) offset += this->src[j].nuse; if(h == 0) printf("Cluster Left Hemisphere\n"); else printf("Cluster Right Hemisphere\n"); Colortable t_CurrentColorTable = p_AnnotationSet[h].getColortable(); VectorXi label_ids = t_CurrentColorTable.getLabelIds(); // Get label ids for every vertex VectorXi vertno_labeled = VectorXi::Zero(this->src[h].vertno.rows()); //ToDo make this more universal -> using Label instead of annotations - obsolete when using Labels for(qint32 i = 0; i < vertno_labeled.rows(); ++i) vertno_labeled[i] = p_AnnotationSet[h].getLabelIds()[this->src[h].vertno[i]]; //Qt Concurrent List QList<RegionMT> m_qListRegionMTIn; // // Generate cluster input data // for (qint32 i = 0; i < label_ids.rows(); ++i) { if (label_ids[i] != 0) { QString curr_name = t_CurrentColorTable.struct_names[i];//obj.label2AtlasName(label(i)); printf("\tCluster %d / %li %s...", i+1, label_ids.rows(), curr_name.toUtf8().constData()); // // Get source space indeces // VectorXi idcs = VectorXi::Zero(vertno_labeled.rows()); qint32 c = 0; //Select ROIs //change this use label info with a hash tabel for(qint32 j = 0; j < vertno_labeled.rows(); ++j) { if(vertno_labeled[j] == label_ids[i]) { idcs[c] = j; ++c; } } idcs.conservativeResize(c); //get selected MT MatrixXd t_MT(p_outMT.rows(), idcs.rows()*3); for(qint32 j = 0; j < idcs.rows(); ++j) t_MT.block(0, j*3, t_MT.rows(), 3) = p_outMT.block(0, (idcs[j]+offset)*3, t_MT.rows(), 3); qint32 nSens = t_MT.rows(); qint32 nSources = t_MT.cols()/3; if (nSources > 0) { RegionMT t_sensMT; t_sensMT.idcs = idcs; t_sensMT.iLabelIdxIn = i; t_sensMT.nClusters = ceil((double)nSources/(double)p_iClusterSize); t_sensMT.matRoiMTOrig = t_MT; printf("%d Cluster(s)... ", t_sensMT.nClusters); // Reshape Input data -> sources rows; sensors columns t_sensMT.matRoiMT = MatrixXd(t_MT.cols()/3, 3*nSens); for(qint32 j = 0; j < nSens; ++j) for(qint32 k = 0; k < t_sensMT.matRoiMT.rows(); ++k) t_sensMT.matRoiMT.block(k,j*3,1,3) = t_MT.block(j,k*3,1,3); m_qListRegionMTIn.append(t_sensMT); printf("[added]\n"); } else { printf("failed! Label contains no sources.\n"); } } } // // Calculate clusters // printf("Clustering... "); QFuture< RegionMTOut > res; res = QtConcurrent::mapped(m_qListRegionMTIn, &RegionMT::cluster); res.waitForFinished(); // // Assign results // MatrixXd t_MT_partial; qint32 nClusters; qint32 nSens; QList<RegionMT>::const_iterator itIn; itIn = m_qListRegionMTIn.begin(); QFuture<RegionMTOut>::const_iterator itOut; for (itOut = res.constBegin(); itOut != res.constEnd(); ++itOut) { nClusters = itOut->ctrs.rows(); nSens = itOut->ctrs.cols()/3; t_MT_partial = MatrixXd::Zero(nSens, nClusters*3); // std::cout << "Number of Clusters: " << nClusters << " x " << nSens << std::endl;//itOut->iLabelIdcsOut << std::endl; // // Assign the centroid for each cluster to the partial G // //ToDo change this use indeces found with whitened data for(qint32 j = 0; j < nSens; ++j) for(qint32 k = 0; k < nClusters; ++k) t_MT_partial.block(j, k*3, 1, 3) = itOut->ctrs.block(k,j*3,1,3); // // Get cluster indizes and its distances to the centroid // for(qint32 j = 0; j < nClusters; ++j) { VectorXi clusterIdcs = VectorXi::Zero(itOut->roiIdx.rows()); VectorXd clusterDistance = VectorXd::Zero(itOut->roiIdx.rows()); qint32 nClusterIdcs = 0; for(qint32 k = 0; k < itOut->roiIdx.rows(); ++k) { if(itOut->roiIdx[k] == j) { clusterIdcs[nClusterIdcs] = itIn->idcs[k]; // qint32 offset = h == 0 ? 0 : this->src[0].nuse; // Q_UNUSED(offset) clusterDistance[nClusterIdcs] = itOut->D(k,j); ++nClusterIdcs; } } clusterIdcs.conservativeResize(nClusterIdcs); clusterDistance.conservativeResize(nClusterIdcs); VectorXi clusterVertnos = VectorXi::Zero(clusterIdcs.size()); for(qint32 k = 0; k < clusterVertnos.size(); ++k) clusterVertnos(k) = this->src[h].vertno[clusterIdcs(k)]; t_qListMNEClusterInfo[h].clusterVertnos.append(clusterVertnos); } // // Assign partial G to new LeadField // if(t_MT_partial.rows() > 0 && t_MT_partial.cols() > 0) { t_MT_new.conservativeResize(t_MT_partial.rows(), t_MT_new.cols() + t_MT_partial.cols()); t_MT_new.block(0, t_MT_new.cols() - t_MT_partial.cols(), t_MT_new.rows(), t_MT_partial.cols()) = t_MT_partial; // Map the centroids to the closest rr for(qint32 k = 0; k < nClusters; ++k) { qint32 j = 0; double sqec = sqrt((itIn->matRoiMTOrig.block(0, j*3, itIn->matRoiMTOrig.rows(), 3) - t_MT_partial.block(0, k*3, t_MT_partial.rows(), 3)).array().pow(2).sum()); double sqec_min = sqec; qint32 j_min = 0; // MatrixXd matGainDiff; for(qint32 j = 1; j < itIn->idcs.rows(); ++j) { sqec = sqrt((itIn->matRoiMTOrig.block(0, j*3, itIn->matRoiMTOrig.rows(), 3) - t_MT_partial.block(0, k*3, t_MT_partial.rows(), 3)).array().pow(2).sum()); if(sqec < sqec_min) { sqec_min = sqec; j_min = j; // matGainDiff = itIn->matRoiGOrig.block(0, j*3, itIn->matRoiGOrig.rows(), 3) - t_G_partial.block(0, k*3, t_G_partial.rows(), 3); } } // qListGainDist.append(matGainDiff); // Take the closest coordinates // qint32 sel_idx = itIn->idcs[j_min]; // Q_UNUSED(sel_idx) // //vertices // std::cout << this->src[h].vertno[sel_idx] << ", "; ++count; } } ++itIn; } printf("[done]\n"); } // // Cluster operator D (sources x clusters) // qint32 totalNumOfClust = 0; for (qint32 h = 0; h < 2; ++h) totalNumOfClust += t_qListMNEClusterInfo[h].clusterVertnos.size(); if(this->isFixedOrient()) p_D = MatrixXd::Zero(p_outMT.cols(), totalNumOfClust); else p_D = MatrixXd::Zero(p_outMT.cols(), totalNumOfClust*3); QList<VectorXi> t_vertnos = this->src.get_vertno(); // qDebug() << "Size: " << t_vertnos[0].size() << t_vertnos[1].size(); // qDebug() << "this->sol->data.cols(): " << this->sol->data.cols(); qint32 currentCluster = 0; for (qint32 h = 0; h < 2; ++h) { int hemiOffset = h == 0 ? 0 : t_vertnos[0].size(); for(qint32 i = 0; i < t_qListMNEClusterInfo[h].clusterVertnos.size(); ++i) { VectorXi idx_sel; MNEMath::intersect(t_vertnos[h], t_qListMNEClusterInfo[h].clusterVertnos[i], idx_sel); // std::cout << "\nVertnos:\n" << t_vertnos[h] << std::endl; // std::cout << "clusterVertnos[i]:\n" << t_qListMNEClusterInfo[h].clusterVertnos[i] << std::endl; idx_sel.array() += hemiOffset; // std::cout << "idx_sel]:\n" << idx_sel << std::endl; double selectWeight = 1.0/idx_sel.size(); if(this->isFixedOrient()) { for(qint32 j = 0; j < idx_sel.size(); ++j) p_D.col(currentCluster)[idx_sel(j)] = selectWeight; } else { qint32 clustOffset = currentCluster*3; for(qint32 j = 0; j < idx_sel.size(); ++j) { qint32 idx_sel_Offset = idx_sel(j)*3; //x p_D(idx_sel_Offset,clustOffset) = selectWeight; //y p_D(idx_sel_Offset+1, clustOffset+1) = selectWeight; //z p_D(idx_sel_Offset+2, clustOffset+2) = selectWeight; } } ++currentCluster; } } // std::cout << "D:\n" << D.row(0) << std::endl << D.row(1) << std::endl << D.row(2) << std::endl << D.row(3) << std::endl << D.row(4) << std::endl << D.row(5) << std::endl; // // Put it all together // p_outMT = t_MT_new; return p_outMT; }