bool KMeans::onlineUpdate(MatrixXd& X, MatrixXd& C, VectorXi& idx) { // Initialize some cluster information prior to phase two MatrixXd Xmid1; MatrixXd Xmid2; if (m_sDistance.compare("cityblock") == 0) { Xmid1 = MatrixXd::Zero(k,p); Xmid2 = MatrixXd::Zero(k,p); for(qint32 i = 0; i < k; ++i) { if (m[i] > 0) { // Separate out sorted coords for points in i'th cluster, // and save values above and below median, component-wise MatrixXd Xsorted(m[i],p); qint32 c = 0; for(qint32 j = 0; j < idx.rows(); ++j) { if(idx[j] == i) { Xsorted.row(c) = X.row(j); ++c; } } for(qint32 j = 0; j < Xsorted.cols(); ++j) std::sort(Xsorted.col(j).data(),Xsorted.col(j).data()+Xsorted.rows()); qint32 nn = floor(0.5*m[i])-1; if ((m[i] % 2) == 0) { Xmid1.row(i) = Xsorted.row(nn); Xmid2.row(i) = Xsorted.row(nn+1); } else if (m[i] > 1) { Xmid1.row(i) = Xsorted.row(nn); Xmid2.row(i) = Xsorted.row(nn+2); } else { Xmid1.row(i) = Xsorted.row(0); Xmid2.row(i) = Xsorted.row(0); } } } } else if (m_sDistance.compare("hamming") == 0) { // Xsum = zeros(k,p); // for i = 1:k // if m(i) > 0 // % Sum coords for points in i'th cluster, component-wise // Xsum(i,:) = sum(X(idx==i,:), 1); // end // end } // // Begin phase two: single reassignments // VectorXi changed = VectorXi(m.rows()); qint32 count = 0; for(qint32 i = 0; i < m.rows(); ++i) { if(m[i] > 0) { changed[count] = i; ++count; } } changed.conservativeResize(count); qint32 lastmoved = 0; qint32 nummoved = 0; qint32 iter1 = iter; bool converged = false; while (iter < m_iMaxit) { // Calculate distances to each cluster from each point, and the // potential change in total sum of errors for adding or removing // each point from each cluster. Clusters that have not changed // membership need not be updated. // // Singleton clusters are a special case for the sum of dists // calculation. Removing their only point is never best, so the // reassignment criterion had better guarantee that a singleton // point will stay in its own cluster. Happily, we get // Del(i,idx(i)) == 0 automatically for them. if (m_sDistance.compare("sqeuclidean") == 0) { for(qint32 j = 0; j < changed.rows(); ++j) { qint32 i = changed[j]; VectorXi mbrs = VectorXi::Zero(idx.rows()); for(qint32 l = 0; l < idx.rows(); ++l) if(idx[l] == i) mbrs[l] = 1; VectorXi sgn = 1 - 2 * mbrs.array(); // -1 for members, 1 for nonmembers if (m[i] == 1) for(qint32 l = 0; l < mbrs.rows(); ++l) if(mbrs[l]) sgn[l] = 0; // prevent divide-by-zero for singleton mbrs Del.col(i) = ((double)m[i] / ((double)m[i] + sgn.cast<double>().array())); Del.col(i).array() *= (X - C.row(i).replicate(n,1)).array().pow(2).rowwise().sum().array(); } } else if (m_sDistance.compare("cityblock") == 0) { for(qint32 j = 0; j < changed.rows(); ++j) { qint32 i = changed[j]; if (m(i) % 2 == 0) // this will never catch singleton clusters { MatrixXd ldist = Xmid1.row(i).replicate(n,1) - X; MatrixXd rdist = X - Xmid2.row(i).replicate(n,1); VectorXd mbrs = VectorXd::Zero(idx.rows()); for(qint32 l = 0; l < idx.rows(); ++l) if(idx[l] == i) mbrs[l] = 1; MatrixXd sgn = ((-2*mbrs).array() + 1).replicate(1, p); // -1 for members, 1 for nonmembers rdist = sgn.array()*rdist.array(); ldist = sgn.array()*ldist.array(); for(qint32 l = 0; l < idx.rows(); ++l) { double sum = 0; for(qint32 h = 0; h < rdist.cols(); ++h) sum += rdist(l,h) > ldist(l,h) ? rdist(l,h) < 0 ? 0 : rdist(l,h) : ldist(l,h) < 0 ? 0 : ldist(l,h); Del(l,i) = sum; } } else Del.col(i) = ((X - C.row(i).replicate(n,1)).array().abs()).rowwise().sum(); } } else if (m_sDistance.compare("cosine") == 0 || m_sDistance.compare("correlation") == 0) { // The points are normalized, centroids are not, so normalize them MatrixXd normC = C.array().pow(2).rowwise().sum().sqrt(); // if any(normC < eps(class(normC))) % small relative to unit-length data points // error('Zero cluster centroid created at iteration %d during replicate %d.',iter, rep); // end // This can be done without a loop, but the loop saves memory allocations MatrixXd XCi; qint32 i; for(qint32 j = 0; j < changed.rows(); ++j) { i = changed[j]; XCi = X * C.row(i).transpose(); VectorXi mbrs = VectorXi::Zero(idx.rows()); for(qint32 l = 0; l < idx.rows(); ++l) if(idx[l] == i) mbrs[l] = 1; VectorXi sgn = 1 - 2 * mbrs.array(); // -1 for members, 1 for nonmembers double A = (double)m[i] * normC(i,0); double B = pow(((double)m[i] * normC(i,0)),2); Del.col(i) = 1 + sgn.cast<double>().array()* (A - (B + 2 * sgn.cast<double>().array() * m[i] * XCi.array() + 1).sqrt()); std::cout << "Del.col(i)\n" << Del.col(i) << std::endl; // Del(:,i) = 1 + sgn .*... // (m(i).*normC(i) - sqrt((m(i).*normC(i)).^2 + 2.*sgn.*m(i).*XCi + 1)); } } else if (m_sDistance.compare("hamming") == 0) { // for i = changed // if mod(m(i),2) == 0 % this will never catch singleton clusters // % coords with an unequal number of 0s and 1s have a // % different contribution than coords with an equal // % number // unequal01 = find(2*Xsum(i,:) ~= m(i)); // numequal01 = p - length(unequal01); // mbrs = (idx == i); // Di = abs(X(:,unequal01) - C(repmat(i,n,1),unequal01)); // Del(:,i) = (sum(Di, 2) + mbrs*numequal01) / p; // else // Del(:,i) = sum(abs(X - C(repmat(i,n,1),:)), 2) / p; // end // end } // Determine best possible move, if any, for each point. Next we // will pick one from those that actually did move. previdx = idx; prevtotsumD = totsumD; VectorXi nidx = VectorXi::Zero(Del.rows()); VectorXd minDel = VectorXd::Zero(Del.rows()); for(qint32 i = 0; i < Del.rows(); ++i) minDel[i] = Del.row(i).minCoeff(&nidx[i]); VectorXi moved = VectorXi::Zero(previdx.rows()); qint32 count = 0; for(qint32 i = 0; i < moved.rows(); ++i) { if(previdx[i] != nidx[i]) { moved[count] = i; ++count; } } moved.conservativeResize(count); if (moved.sum() > 0) { // Resolve ties in favor of not moving VectorXi moved_new = VectorXi::Zero(moved.rows()); count = 0; for(qint32 i = 0; i < moved.rows(); ++i) { if ( Del.array()(previdx[moved(i)]*n + moved(i)) > minDel(moved(i))) { moved_new[count] = moved[i]; ++count; } } moved_new.conservativeResize(count); moved = moved_new; } if (moved.rows() <= 0) { // Count an iteration if phase 2 did nothing at all, or if we're // in the middle of a pass through all the points if ((iter == iter1) || nummoved > 0) { ++iter; // printf("%6d\t%6d\t%8d\t%12g\n",iter,2,nummoved,totsumD); } converged = true; break; } // Pick the next move in cyclic order VectorXi moved_new(moved.rows()); for(qint32 i = 0; i < moved.rows(); ++i) moved_new[i] = ((moved[i] - lastmoved) % n) + lastmoved; moved[0] = moved_new.minCoeff() % n;//+1 moved.conservativeResize(1); // If we've gone once through all the points, that's an iteration if (moved[0] <= lastmoved) { ++iter; // printf("%6d\t%6d\t%8d\t%12g\n",iter,2,nummoved,totsumD); if(iter >= m_iMaxit) break; nummoved = 0; } ++nummoved; lastmoved = moved[0]; qint32 oidx = idx(moved[0]); nidx[0] = nidx(moved[0]); nidx.conservativeResize(1); totsumD += Del(moved[0],nidx[0]) - Del(moved[0],oidx); // Update the cluster index vector, and the old and new cluster // counts and centroids idx[ moved[0] ] = nidx[0]; m( nidx[0] ) = m( nidx[0] ) + 1; m( oidx ) = m( oidx ) - 1; if (m_sDistance.compare("sqeuclidean") == 0) { C.row(nidx[0]) = C.row(nidx[0]).array() + (X.row(moved[0]) - C.row(nidx[0])).array() / m[nidx[0]]; C.row(oidx) = C.row(oidx).array() - (X.row(moved[0]) - C.row(oidx)).array() / m[oidx]; } else if (m_sDistance.compare("cityblock") == 0) { VectorXi onidx(2); onidx << oidx, nidx[0];//ToDo always right? qint32 i; for(qint32 h = 0; h < 2; ++h) { i = onidx[h]; // Separate out sorted coords for points in each cluster. // New centroid is the coord median, save values above and // below median. All done component-wise. MatrixXd Xsorted(m[i],p); qint32 c = 0; for(qint32 j = 0; j < idx.rows(); ++j) { if(idx[j] == i) { Xsorted.row(c) = X.row(j); ++c; } } for(qint32 j = 0; j < Xsorted.cols(); ++j) std::sort(Xsorted.col(j).data(),Xsorted.col(j).data()+Xsorted.rows()); qint32 nn = floor(0.5*m[i])-1; if ((m[i] % 2) == 0) { C.row(i) = 0.5 * (Xsorted.row(nn) + Xsorted.row(nn+1)); Xmid1.row(i) = Xsorted.row(nn); Xmid2.row(i) = Xsorted.row(nn+1); } else { C.row(i) = Xsorted.row(nn+1); if (m(i) > 1) { Xmid1.row(i) = Xsorted.row(nn); Xmid2.row(i) = Xsorted.row(nn+2); } else { Xmid1.row(i) = Xsorted.row(0); Xmid2.row(i) = Xsorted.row(0); } } } } else if (m_sDistance.compare("cosine") == 0 || m_sDistance.compare("correlation") == 0) { C.row(nidx[0]).array() += (X.row(moved[0]) - C.row(nidx[0])).array() / m[nidx[0]]; C.row(oidx).array() += (X.row(moved[0]) - C.row(oidx)).array() / m[oidx]; } else if (m_sDistance.compare("hamming") == 0) { // % Update summed coords for points in each cluster. New // % centroid is the coord median. All done component-wise. // Xsum(nidx,:) = Xsum(nidx,:) + X(moved,:); // Xsum(oidx,:) = Xsum(oidx,:) - X(moved,:); // C(nidx,:) = .5*sign(2*Xsum(nidx,:) - m(nidx)) + .5; // C(oidx,:) = .5*sign(2*Xsum(oidx,:) - m(oidx)) + .5; } VectorXi sorted_onidx(1+nidx.rows()); sorted_onidx << oidx, nidx; std::sort(sorted_onidx.data(), sorted_onidx.data()+sorted_onidx.rows()); changed = sorted_onidx; } // phase two return converged; } // nested function
size_t SlamCalibrator::processMap(const StreamSequenceBase& sseq, const Trajectory& traj, const Cloud& map, DiscreteDepthDistortionModel* model) const { // -- Select which frame indices from the sequence to use. // Consider only those with a pose in the Trajectory, // and apply downsampling based on increment_. vector<size_t> indices; indices.reserve(traj.numValid()); int num = 0; for(size_t i = 0; i < traj.size(); ++i) { if(traj.exists(i)) { ++num; if(num % increment_ == 0) indices.push_back(i); } } // -- For all selected frames, accumulate training examples // in the distortion model. VectorXi counts = VectorXi::Zero(indices.size()); #pragma omp parallel for for(size_t i = 0; i < indices.size(); ++i) { size_t idx = indices[i]; BOOST_ASSERT(traj.exists(idx)); cout << "." << flush; Frame measurement; sseq.readFrame(idx, &measurement); Frame mapframe; mapframe.depth_ = DepthMatPtr(new DepthMat); sseq.proj_.estimateMapDepth(map, traj.get(idx).inverse().cast<float>(), measurement, mapframe.depth_.get()); counts[i] = model->accumulate(*mapframe.depth_, *measurement.depth_); // cv::imshow("map", mapframe.depthImage()); // cv::imshow("measurement", measurement.depthImage()); // cv::waitKey(); // -- Quick and dirty option for data inspection. if(getenv("U") && getenv("V")) { int u_center = atoi(getenv("U")); int v_center = atoi(getenv("V")); int radius = 1; for(int u = max(0, u_center - radius); u < min(640, u_center + radius + 1); ++u) { for(int v = max(0, v_center - radius); v < min(480, v_center + radius + 1); ++v) { if(mapframe.depth_->coeffRef(v, u) == 0) continue; if(measurement.depth_->coeffRef(v, u) == 0) continue; cerr << mapframe.depth_->coeffRef(v, u) * 0.001 << " " << measurement.depth_->coeffRef(v, u) * 0.001 << endl; } } } } cout << endl; return counts.sum(); }
bool KMeans::batchUpdate(MatrixXd& X, MatrixXd& C, VectorXi& idx) { // Every point moved, every cluster will need an update qint32 i = 0; VectorXi moved(n); for(i = 0; i < n; ++i) moved[i] = i; VectorXi changed(k); for(i = 0; i < k; ++i) changed[i] = i; previdx = VectorXi::Zero(n); prevtotsumD = std::numeric_limits<double>::max();//max double MatrixXd D = MatrixXd::Zero(X.rows(), k); // // Begin phase one: batch reassignments // iter = 0; bool converged = false; while(true) { ++iter; // Calculate the new cluster centroids and counts, and update the // distance from every point to those new cluster centroids MatrixXd C_new; VectorXi m_new; KMeans::gcentroids(X, idx, changed, C_new, m_new); MatrixXd D_new = distfun(X, C_new, iter); for(qint32 i = 0; i < changed.rows(); ++i) { C.row(changed[i]) = C_new.row(i); D.col(changed[i]) = D_new.col(i); m[changed[i]] = m_new[i]; } // Deal with clusters that have just lost all their members VectorXi empties = VectorXi::Zero(changed.rows()); for(qint32 i = 0; i < changed.rows(); ++i) if(m(i) == 0) empties[i] = 1; if (empties.sum() > 0) { if (m_sEmptyact.compare("error") == 0) { throw 0; } else if (m_sEmptyact.compare("drop") == 0) { // // Remove the empty cluster from any further processing // D(:,empties) = NaN; // changed = changed(m(changed) > 0); // warning('Empty cluster created at iteration %d during replicate %d.',iter, rep,); } else if (m_sEmptyact.compare("singleton") == 0) { // warning('Empty cluster created at iteration %d during replicate %d.', iter, rep); // for i = empties // d = D((idx-1)*n + (1:n)'); // use newly updated distances // % Find the point furthest away from its current cluster. // % Take that point out of its cluster and use it to create // % a new singleton cluster to replace the empty one. // [dlarge, lonely] = max(d); // from = idx(lonely); % taking from this cluster // if m(from) < 2 // % In the very unusual event that the cluster had only // % one member, pick any other non-singleton point. // from = find(m>1,1,'first'); // lonely = find(idx==from,1,'first'); // end // C(i,:) = X(lonely,:); // m(i) = 1; // idx(lonely) = i; // D(:,i) = distfun(X, C(i,:), distance, iter); // % Update clusters from which points are taken // [C(from,:), m(from)] = gcentroids(X, idx, from, distance); // D(:,from) = distfun(X, C(from,:), distance, iter); // changed = unique([changed from]); // end } } // Compute the total sum of distances for the current configuration. totsumD = 0; for(qint32 i = 0; i < n; ++i) totsumD += D.array()(idx[i]*n+i);//Colum Major // Test for a cycle: if objective is not decreased, back out // the last step and move on to the single update phase if(prevtotsumD <= totsumD) { idx = previdx; MatrixXd C_new; VectorXi m_new; gcentroids(X, idx, changed, C_new, m_new); C.block(0,0,k,C.cols()) = C_new; m.block(0,0,k,1) = m_new; --iter; break; } // printf("%6d\t%6d\t%8d\t%12g\n",iter,1,moved.rows(),totsumD); if (iter >= m_iMaxit) break; // Determine closest cluster for each point and reassign points to clusters previdx = idx; prevtotsumD = totsumD; VectorXi nidx(D.rows()); for(qint32 i = 0; i < D.rows(); ++i) d[i] = D.row(i).minCoeff(&nidx[i]); // Determine which points moved VectorXi moved = VectorXi::Zero(nidx.rows()); qint32 count = 0; for(qint32 i = 0; i < nidx.rows(); ++i) { if(nidx[i] != previdx[i]) { moved[count] = i; ++count; } } moved.conservativeResize(count); if (moved.rows() > 0) { // Resolve ties in favor of not moving VectorXi moved_new = VectorXi::Zero(moved.rows()); count = 0; for(qint32 i = 0; i < moved.rows(); ++i) { if(D.array()(previdx[moved[i]] * n + moved[i]) > d[moved[i]]) { moved_new[count] = moved[i]; ++count; } } moved_new.conservativeResize(count); moved = moved_new; } if (moved.rows() == 0) { converged = true; break; } for(qint32 i = 0; i < moved.rows(); ++i) if(moved[i] >= 0) idx[ moved[i] ] = nidx[ moved[i] ]; // Find clusters that gained or lost members std::vector<int> tmp; for(qint32 i = 0; i < moved.rows(); ++i) tmp.push_back(idx[moved[i]]); for(qint32 i = 0; i < moved.rows(); ++i) tmp.push_back(previdx[moved[i]]); std::sort(tmp.begin(),tmp.end()); std::vector<int>::iterator it; it = std::unique(tmp.begin(),tmp.end()); tmp.resize( it - tmp.begin() ); changed.conservativeResize(tmp.size()); for(quint32 i = 0; i < tmp.size(); ++i) changed[i] = tmp[i]; } // phase one return converged; } // nested function
MatrixXd MNEInverseOperator::cluster_kernel(const AnnotationSet &p_AnnotationSet, qint32 p_iClusterSize, MatrixXd& p_D) const { MatrixXd p_outMT = this->m_K.transpose(); QList<MNEClusterInfo> t_qListMNEClusterInfo; MNEClusterInfo t_MNEClusterInfo; t_qListMNEClusterInfo.append(t_MNEClusterInfo); t_qListMNEClusterInfo.append(t_MNEClusterInfo); // // Check consisty // if(this->isFixedOrient()) { printf("Error: Fixed orientation not implemented jet!\n"); return p_outMT; } // qDebug() << "p_outMT" << p_outMT.rows() << "x" << p_outMT.cols(); // MatrixXd t_G_Whitened(0,0); // bool t_bUseWhitened = false; // // // //Whiten gain matrix before clustering -> cause diffenerent units Magnetometer, Gradiometer and EEG // // // if(!p_pNoise_cov.isEmpty() && !p_pInfo.isEmpty()) // { // FiffInfo p_outFwdInfo; // FiffCov p_outNoiseCov; // MatrixXd p_outWhitener; // qint32 p_outNumNonZero; // //do whitening with noise cov // this->prepare_forward(p_pInfo, p_pNoise_cov, false, p_outFwdInfo, t_G_Whitened, p_outNoiseCov, p_outWhitener, p_outNumNonZero); // printf("\tWhitening the forward solution.\n"); // t_G_Whitened = p_outWhitener*t_G_Whitened; // t_bUseWhitened = true; // } // // Assemble input data // qint32 count; qint32 offset; MatrixXd t_MT_new; for(qint32 h = 0; h < this->src.size(); ++h ) { count = 0; offset = 0; // Offset for continuous indexing; if(h > 0) for(qint32 j = 0; j < h; ++j) offset += this->src[j].nuse; if(h == 0) printf("Cluster Left Hemisphere\n"); else printf("Cluster Right Hemisphere\n"); Colortable t_CurrentColorTable = p_AnnotationSet[h].getColortable(); VectorXi label_ids = t_CurrentColorTable.getLabelIds(); // Get label ids for every vertex VectorXi vertno_labeled = VectorXi::Zero(this->src[h].vertno.rows()); //ToDo make this more universal -> using Label instead of annotations - obsolete when using Labels for(qint32 i = 0; i < vertno_labeled.rows(); ++i) vertno_labeled[i] = p_AnnotationSet[h].getLabelIds()[this->src[h].vertno[i]]; //Qt Concurrent List QList<RegionMT> m_qListRegionMTIn; // // Generate cluster input data // for (qint32 i = 0; i < label_ids.rows(); ++i) { if (label_ids[i] != 0) { QString curr_name = t_CurrentColorTable.struct_names[i];//obj.label2AtlasName(label(i)); printf("\tCluster %d / %li %s...", i+1, label_ids.rows(), curr_name.toUtf8().constData()); // // Get source space indeces // VectorXi idcs = VectorXi::Zero(vertno_labeled.rows()); qint32 c = 0; //Select ROIs //change this use label info with a hash tabel for(qint32 j = 0; j < vertno_labeled.rows(); ++j) { if(vertno_labeled[j] == label_ids[i]) { idcs[c] = j; ++c; } } idcs.conservativeResize(c); //get selected MT MatrixXd t_MT(p_outMT.rows(), idcs.rows()*3); for(qint32 j = 0; j < idcs.rows(); ++j) t_MT.block(0, j*3, t_MT.rows(), 3) = p_outMT.block(0, (idcs[j]+offset)*3, t_MT.rows(), 3); qint32 nSens = t_MT.rows(); qint32 nSources = t_MT.cols()/3; if (nSources > 0) { RegionMT t_sensMT; t_sensMT.idcs = idcs; t_sensMT.iLabelIdxIn = i; t_sensMT.nClusters = ceil((double)nSources/(double)p_iClusterSize); t_sensMT.matRoiMTOrig = t_MT; printf("%d Cluster(s)... ", t_sensMT.nClusters); // Reshape Input data -> sources rows; sensors columns t_sensMT.matRoiMT = MatrixXd(t_MT.cols()/3, 3*nSens); for(qint32 j = 0; j < nSens; ++j) for(qint32 k = 0; k < t_sensMT.matRoiMT.rows(); ++k) t_sensMT.matRoiMT.block(k,j*3,1,3) = t_MT.block(j,k*3,1,3); m_qListRegionMTIn.append(t_sensMT); printf("[added]\n"); } else { printf("failed! Label contains no sources.\n"); } } } // // Calculate clusters // printf("Clustering... "); QFuture< RegionMTOut > res; res = QtConcurrent::mapped(m_qListRegionMTIn, &RegionMT::cluster); res.waitForFinished(); // // Assign results // MatrixXd t_MT_partial; qint32 nClusters; qint32 nSens; QList<RegionMT>::const_iterator itIn; itIn = m_qListRegionMTIn.begin(); QFuture<RegionMTOut>::const_iterator itOut; for (itOut = res.constBegin(); itOut != res.constEnd(); ++itOut) { nClusters = itOut->ctrs.rows(); nSens = itOut->ctrs.cols()/3; t_MT_partial = MatrixXd::Zero(nSens, nClusters*3); // std::cout << "Number of Clusters: " << nClusters << " x " << nSens << std::endl;//itOut->iLabelIdcsOut << std::endl; // // Assign the centroid for each cluster to the partial G // //ToDo change this use indeces found with whitened data for(qint32 j = 0; j < nSens; ++j) for(qint32 k = 0; k < nClusters; ++k) t_MT_partial.block(j, k*3, 1, 3) = itOut->ctrs.block(k,j*3,1,3); // // Get cluster indizes and its distances to the centroid // for(qint32 j = 0; j < nClusters; ++j) { VectorXi clusterIdcs = VectorXi::Zero(itOut->roiIdx.rows()); VectorXd clusterDistance = VectorXd::Zero(itOut->roiIdx.rows()); qint32 nClusterIdcs = 0; for(qint32 k = 0; k < itOut->roiIdx.rows(); ++k) { if(itOut->roiIdx[k] == j) { clusterIdcs[nClusterIdcs] = itIn->idcs[k]; // qint32 offset = h == 0 ? 0 : this->src[0].nuse; // Q_UNUSED(offset) clusterDistance[nClusterIdcs] = itOut->D(k,j); ++nClusterIdcs; } } clusterIdcs.conservativeResize(nClusterIdcs); clusterDistance.conservativeResize(nClusterIdcs); VectorXi clusterVertnos = VectorXi::Zero(clusterIdcs.size()); for(qint32 k = 0; k < clusterVertnos.size(); ++k) clusterVertnos(k) = this->src[h].vertno[clusterIdcs(k)]; t_qListMNEClusterInfo[h].clusterVertnos.append(clusterVertnos); } // // Assign partial G to new LeadField // if(t_MT_partial.rows() > 0 && t_MT_partial.cols() > 0) { t_MT_new.conservativeResize(t_MT_partial.rows(), t_MT_new.cols() + t_MT_partial.cols()); t_MT_new.block(0, t_MT_new.cols() - t_MT_partial.cols(), t_MT_new.rows(), t_MT_partial.cols()) = t_MT_partial; // Map the centroids to the closest rr for(qint32 k = 0; k < nClusters; ++k) { qint32 j = 0; double sqec = sqrt((itIn->matRoiMTOrig.block(0, j*3, itIn->matRoiMTOrig.rows(), 3) - t_MT_partial.block(0, k*3, t_MT_partial.rows(), 3)).array().pow(2).sum()); double sqec_min = sqec; qint32 j_min = 0; // MatrixXd matGainDiff; for(qint32 j = 1; j < itIn->idcs.rows(); ++j) { sqec = sqrt((itIn->matRoiMTOrig.block(0, j*3, itIn->matRoiMTOrig.rows(), 3) - t_MT_partial.block(0, k*3, t_MT_partial.rows(), 3)).array().pow(2).sum()); if(sqec < sqec_min) { sqec_min = sqec; j_min = j; // matGainDiff = itIn->matRoiGOrig.block(0, j*3, itIn->matRoiGOrig.rows(), 3) - t_G_partial.block(0, k*3, t_G_partial.rows(), 3); } } // qListGainDist.append(matGainDiff); // Take the closest coordinates // qint32 sel_idx = itIn->idcs[j_min]; // Q_UNUSED(sel_idx) // //vertices // std::cout << this->src[h].vertno[sel_idx] << ", "; ++count; } } ++itIn; } printf("[done]\n"); } // // Cluster operator D (sources x clusters) // qint32 totalNumOfClust = 0; for (qint32 h = 0; h < 2; ++h) totalNumOfClust += t_qListMNEClusterInfo[h].clusterVertnos.size(); if(this->isFixedOrient()) p_D = MatrixXd::Zero(p_outMT.cols(), totalNumOfClust); else p_D = MatrixXd::Zero(p_outMT.cols(), totalNumOfClust*3); QList<VectorXi> t_vertnos = this->src.get_vertno(); // qDebug() << "Size: " << t_vertnos[0].size() << t_vertnos[1].size(); // qDebug() << "this->sol->data.cols(): " << this->sol->data.cols(); qint32 currentCluster = 0; for (qint32 h = 0; h < 2; ++h) { int hemiOffset = h == 0 ? 0 : t_vertnos[0].size(); for(qint32 i = 0; i < t_qListMNEClusterInfo[h].clusterVertnos.size(); ++i) { VectorXi idx_sel; MNEMath::intersect(t_vertnos[h], t_qListMNEClusterInfo[h].clusterVertnos[i], idx_sel); // std::cout << "\nVertnos:\n" << t_vertnos[h] << std::endl; // std::cout << "clusterVertnos[i]:\n" << t_qListMNEClusterInfo[h].clusterVertnos[i] << std::endl; idx_sel.array() += hemiOffset; // std::cout << "idx_sel]:\n" << idx_sel << std::endl; double selectWeight = 1.0/idx_sel.size(); if(this->isFixedOrient()) { for(qint32 j = 0; j < idx_sel.size(); ++j) p_D.col(currentCluster)[idx_sel(j)] = selectWeight; } else { qint32 clustOffset = currentCluster*3; for(qint32 j = 0; j < idx_sel.size(); ++j) { qint32 idx_sel_Offset = idx_sel(j)*3; //x p_D(idx_sel_Offset,clustOffset) = selectWeight; //y p_D(idx_sel_Offset+1, clustOffset+1) = selectWeight; //z p_D(idx_sel_Offset+2, clustOffset+2) = selectWeight; } } ++currentCluster; } } // std::cout << "D:\n" << D.row(0) << std::endl << D.row(1) << std::endl << D.row(2) << std::endl << D.row(3) << std::endl << D.row(4) << std::endl << D.row(5) << std::endl; // // Put it all together // p_outMT = t_MT_new; return p_outMT; }