/** * Compute a distance metric between two columns of a * matrix. <b>Note that the indexes are *1* based (not 0) as that is * Newmat's convention</b>. Note that dist(M,i,j) must equal dist(M,j,i); * * @param M - Matrix whose columns represent individual items to be clustered. * @param col1Ix - Column index to be compared (1 based). * @param col2Ix - Column index to be compared (1 based). * * @return - "Distance" or "dissimilarity" metric between two columns of matrix. */ double GuassianRadial::dist(const Matrix &M, int col1Ix, int col2Ix) const { double dist = 0; if(col1Ix == col2Ix) return 0; ColumnVector V = M.Column(col1Ix) - M.Column(col2Ix); dist = V.SumSquare() / (2 * m_Sigma * m_Sigma); dist = exp(-1 * dist); return dist; }
ReturnMatrix Helmert(const Matrix& X, bool full) { REPORT Tracer et("Helmert * Matrix"); int m = X.nrows(); int n = X.ncols(); if (m == 0) Throw(ProgramException("Matrix has 0 rows ", X)); Matrix Y; if (full) Y.resize(m,n); else Y.resize(m-1, n); for (int j = 1; j <= n; ++j) { ColumnVector CV = X.Column(j); Y.Column(j) = Helmert(CV, full); } Y.release(); return Y.for_return(); }
std::vector<int> fiedler_reorder(const SymmetricMatrix& m) { SymmetricMatrix absm=m; const int nrows=m.Nrows(); for (int i=0;i<nrows;++i) { for (int j=0;j<=i;++j){ //absolute value absm.element(i,j)=std::fabs(absm.element(i,j)); } } //laplacian SymmetricMatrix lap(nrows); lap=0.; for (int i=0;i<nrows;++i) lap.element(i,i)=absm.Row(i+1).Sum(); lap-=absm; DiagonalMatrix eigs; Matrix vecs; EigenValues(lap,eigs,vecs); ColumnVector fvec=vecs.Column(2); std::vector<double> fvec_stl(nrows); //copies over fvec to fvec_stl std::copy(&fvec.element(0),&fvec.element(0)+nrows,fvec_stl.begin()); std::vector<int> findices; //sorts the data by eigenvalue in ascending order sort_data_to_indices(fvec_stl,findices); return findices; /* BLOCK works with findices*/ }
queue<vector<bool>> Filter_Indep_Col(queue<vector<bool>> q, Matrix A) { queue<vector<bool>> indep_q; int nrow_A = A.Nrows(); int ncol_A = A.Ncols(); int rank_A = Rank(A); int size = q.size(); while (size > 0) { Matrix B(nrow_A, rank_A); int k = 1; for (int j = 0; j < ncol_A; j++) { if (q.front().at(j) == true) { B.Column(k) = A.Column(j+1); k++; } } // see if columns of B are indepenent if (Rank(B) == min(B.Ncols(), B.Nrows())) { indep_q.push(q.front()); } q.pop(); size--; } return indep_q; }
Matrix Find_T_BFS(queue<vector<bool>> q, Matrix S_BFS) { int rank_S = Rank(S_BFS); int nrow_S = S_BFS.Nrows(); int ncol_S = S_BFS.Ncols(); int size = q.size(); Matrix T(nrow_S, rank_S); T = 0; while (size > 0) { int r = 1; for (int i = 1; i < ncol_S; i++) { if (q.front().at(i) == true) { T.Column(r) = S_BFS.Column(i); r++; } } q.pop(); size--; if (Rank(T) == rank_S) break; } return T; }
// Matrix A's first n columns are orthonormal // so A.Columns(1,n).t() * A.Columns(1,n) is the identity matrix. // Fill out the remaining columns of A to make them orthonormal // so A.t() * A is the identity matrix void extend_orthonormal(Matrix& A, int n) { REPORT Tracer et("extend_orthonormal"); int nr = A.nrows(); int nc = A.ncols(); if (nc > nr) Throw(IncompatibleDimensionsException(A)); if (n > nc) Throw(IncompatibleDimensionsException(A)); ColumnVector SSR; { Matrix A1 = A.Columns(1,n); SSR = A1.sum_square_rows(); } for (int i = n; i < nc; ++i) { // pick row with smallest SSQ int k; SSR.minimum1(k); // orthogonalise column with 1 at element k, 0 elsewhere // next line is rather inefficient ColumnVector X = - A.Columns(1, i) * A.SubMatrix(k, k, 1, i).t(); X(k) += 1.0; // normalise X /= sqrt(X.SumSquare()); // update row sums of squares for (k = 1; k <= nr; ++k) SSR(k) += square(X(k)); // load new column into matrix A.Column(i+1) = X; } }
//Predict on a chunk of data. ReturnMatrix SOGP::predictM(const Matrix& in, ColumnVector &sigconf,bool conf){ //printf("SOGP::Predicting on %d points\n",in.Ncols()); Matrix out(alpha.Ncols(),in.Ncols()); sigconf.ReSize(in.Ncols()); for(int c=1;c<=in.Ncols();c++) out.Column(c) = predict(in.Column(c),sigconf(c),conf); out.Release(); return out; }
void CircularShift(const Matrix& X1, int first, int last) { Matrix X; UpperTriangularMatrix U1, U2; int n = X1.Ncols(); // Try right circular shift of columns X = X1; QRZ(X, U1); RightCircularUpdateCholesky(U1, first, last); X = X1.Columns(1,first-1) | X1.Column(last) | X1.Columns(first,last-1) | X1.Columns(last+1,n); QRZ(X, U2); X = U1 - U2; Clean(X, 0.000000001); Print(X); // Try left circular shift of columns X = X1; QRZ(X, U1); LeftCircularUpdateCholesky(U1, first, last); X = X1.Columns(1,first-1) | X1.Columns(first+1,last) | X1.Column(first) | X1.Columns(last+1,n); QRZ(X, U2); X = U1 - U2; Clean(X, 0.000000001); Print(X); }
ReturnMatrix Helmert_transpose(const Matrix& Y, bool full) { REPORT Tracer et("Helmert_transpose * Matrix "); int m = Y.nrows(); int n = Y.ncols(); if (!full) ++m; Matrix X(m, n); for (int j = 1; j <= n; ++j) { ColumnVector CV = Y.Column(j); X.Column(j) = Helmert_transpose(CV, full); } X.release(); return X.for_return(); }
Matrix Find_S_BFS(queue<vector<bool>> q, Matrix A, ColumnVector y) { int nrow_A = A.Nrows(); int ncol_A = A.Ncols(); int rank_A = Rank(A); int size = q.size(); Matrix S_BFS(ncol_A, size); S_BFS = 0; int S_j = 1; // column index for S while (size > 0) { int k = 1; Matrix B(nrow_A, rank_A); for (int j = 0; j < ncol_A; j++) { if (q.front().at(j) == true) { for (int i = 1; i <= nrow_A; i++) { B.Column(k) = A.Column(j + 1); } k++; } } // Find B already, and then find solution ColumnVector s(rank_A); s = Find_Solution(B, y); // Insert zero into appropriate positions // Find S_BFS matrix int kk = 1; ColumnVector soln(ncol_A); for (int i = 0; i < ncol_A; i++) { if (q.front().at(i) == true) { S_BFS(i + 1, S_j) = s(kk); kk++; } else { S_BFS(i + 1, S_j) = 0; } } q.pop(); size--; S_j++; } return S_BFS; }
bool SpectClust::findNLargestSymEvals(const SymmetricMatrix &W, int numLamda, std::vector<Numeric> &eVals, Matrix &EVec) { bool converged = false; eVals.clear(); eVals.reserve(numLamda); DiagonalMatrix D(W.Ncols()); Matrix E; try { EigenValues(W, D, E); converged = true; EVec.ReSize(W.Ncols(), numLamda); int count = 0; for(int i = W.Ncols(); i > W.Ncols() - numLamda; i--) { eVals.push_back(D(i)); EVec.Column(++count) << E.Column(i); } } catch(const Exception &e) { Err::errAbort("Exception: " + ToStr(e.what())); } catch(...) { Err::errAbort("Yikes couldn't calculate eigen vectors."); } return converged; }
Matrix& operator *(const Matrix& a, const Matrix& b) { if (a.Column() != b.Row()) { throw "Exception : Invailed matrix size.\n"; exit(EXIT_FAILURE); } Matrix *re = new Matrix(a.Row(), b.Column()); for (int i = 0; i < re->Row(); i++) { for (int j = 0; j < re->Column(); j++) { register double f = 0.0; for (int k = 0; k < a.Column(); k++) { f += a(i, k) * b(k, j); } (*re)(i, j) = f; } } return *re; }
// produces the Cholesky decomposition of EAE where A = chol.t() * chol // and E produces a LEFT circular shift of the rows and columns from // 1,...,k-1,k,k+1,...l,l+1,...,p to // 1,...,k-1,k+1,...l,k,l+1,...,p to void left_circular_update_Cholesky(UpperTriangularMatrix &chol, int k, int l) { int nRC = chol.Nrows(); int i, j; // I. compute shift of column k to the lth position Matrix cholCopy = chol; // a. grab column k ColumnVector columnK = cholCopy.Column(k); // b. shift columns k+1,...l to the LEFT for(j = k+1; j <= l; ++j) cholCopy.Column(j-1) = cholCopy.Column(j); // c. copy the elements of columnK into the lth column of cholCopy cholCopy.Column(l) = 0.0; for(i = 1; i <= k; ++i) cholCopy(i,l) = columnK(i); // II. apply and compute Given's rotations int nGivens = l-k; ColumnVector cGivens(nGivens); cGivens = 0.0; ColumnVector sGivens(nGivens); sGivens = 0.0; for(j = k; j <= nRC; ++j) { ColumnVector columnJ = cholCopy.Column(j); // apply the previous Givens rotations to columnJ int imax = j - k; if (imax > nGivens) imax = nGivens; for(int i = 1; i <= imax; ++i) { int gIndex = i; int topRowIndex = k + i - 1; GivensRotationR(cGivens(gIndex), sGivens(gIndex), columnJ(topRowIndex), columnJ(topRowIndex+1)); } // compute a new Given's rotation when j < l if(j < l) { int gIndex = j-k+1; columnJ(j) = pythag(columnJ(j), columnJ(j+1), cGivens(gIndex), sGivens(gIndex)); columnJ(j+1) = 0.0; } cholCopy.Column(j) = columnJ; } chol << cholCopy; }
// produces the Cholesky decomposition of EAE where A = chol.t() * chol // and E produces a RIGHT circular shift of the rows and columns from // 1,...,k-1,k,k+1,...l,l+1,...,p to // 1,...,k-1,l,k,k+1,...l-1,l+1,...p void right_circular_update_Cholesky(UpperTriangularMatrix &chol, int k, int l) { int nRC = chol.Nrows(); int i, j; // I. compute shift of column l to the kth position Matrix cholCopy = chol; // a. grab column l ColumnVector columnL = cholCopy.Column(l); // b. shift columns k,...l-1 to the RIGHT for(j = l-1; j >= k; --j) cholCopy.Column(j+1) = cholCopy.Column(j); // c. copy the top k-1 elements of columnL into the kth column of cholCopy cholCopy.Column(k) = 0.0; for(i = 1; i < k; ++i) cholCopy(i,k) = columnL(i); // II. determine the l-k Given's rotations int nGivens = l-k; ColumnVector cGivens(nGivens); cGivens = 0.0; ColumnVector sGivens(nGivens); sGivens = 0.0; for(i = l; i > k; i--) { int givensIndex = l-i+1; columnL(i-1) = pythag(columnL(i-1), columnL(i), cGivens(givensIndex), sGivens(givensIndex)); columnL(i) = 0.0; } // the kth entry of columnL is the new diagonal element in column k of cholCopy cholCopy(k,k) = columnL(k); // III. apply these Given's rotations to subsequent columns // for columns k+1,...,l-1 we only need to apply the last nGivens-(j-k) rotations for(j = k+1; j <= nRC; ++j) { ColumnVector columnJ = cholCopy.Column(j); int imin = nGivens - (j-k) + 1; if (imin < 1) imin = 1; for(int gIndex = imin; gIndex <= nGivens; ++gIndex) { // apply gIndex Given's rotation int topRowIndex = k + nGivens - gIndex; GivensRotationR(cGivens(gIndex), sGivens(gIndex), columnJ(topRowIndex), columnJ(topRowIndex+1)); } cholCopy.Column(j) = columnJ; } chol << cholCopy; }
bool GenSetBase::generateAll(Matrix& M, ColumnVector& X, double D){ if (Size<=0 || Vdim<=0) { cerr << "***ERROR: GenSetBase::generateAll(Matrix,...) " << "called with size=" << Size << ", vdim=" << Vdim << endl; return false; } if (M.Ncols() != Size || M.Nrows() != Vdim) { cerr << "***ERROR: GenSetBase::generateAll(Matrix,...) " << "dimesion of M expected to be " << Vdim << "-by-" << Size << " but is " << M.Nrows() << "-by-" << M.Ncols() << endl; return false; } ColumnVector xi(Vdim); for (int i=1; i<=Size; i++) { generate(i, D, X, xi); M.Column(i) = xi; } return true; }
bool SpectClust::findNLargestEvals(const Matrix &M, int numLamda, std::vector<Numeric> &eVals, Matrix &EVec, int maxIterations) { bool converged = true; EVec.ReSize(M.Ncols(), numLamda); eVals.clear(); eVals.reserve(numLamda); Matrix W = M; for(int i = 1; i <= numLamda; i++) { ColumnVector maxVec; double maxVal; /* Get the maximum eigen vector. */ converged = MaxEigen(W, maxVal, maxVec, maxIterations) && converged; EVec.Column(i) << maxVec; eVals.push_back(maxVal); /* Now subtract of the largest eigen value to get the next largest in next iteration. */ Matrix ToSub = maxVal * (maxVec * maxVec.t()); W = W - ToSub; } return converged; }
/** * Set the matrix. This precalculates the norms for each column so * they are only calculated once. * @param M - Matrix to be used. */ void AngleMetric::setMatrix(const Matrix &M) { m_Norms.resize(M.Ncols(),0); for(int i=1; i <= M.Ncols(); i++) { m_Norms[i-1] = sqrt(M.Column(i).SumSquare()); } }
void SpectClustTest::testAngleDist() { Matrix M, Gold; SymmetricMatrix Dist; RFileToMatrix(M, "data/spike-in.b12.txt"); RFileToMatrix(Gold, "data/spike-in.norm.angleDist.b12.0-2.txt"); SpectClust::rowMedianDivide(M); M = M.t(); AngleMetric metric(M); SpectClust::fillInDistance(Dist, M, metric, false); Matrix Diff = Gold - Dist; double maxDiff = MaximumAbsoluteValue(Diff); if(maxDiff > .00001) { CPPUNIT_ASSERT(false); } Matrix GoldVec; RFileToMatrix(GoldVec, "data/spike-in.norm.angleDist.eVec.b12.0-2.txt"); SpectClust::normalizeSum(Dist); bool converged = false; vector<double> eVals; Matrix EVec; converged = SpectClust::findNLargestEvals(Dist, 2, eVals, EVec, 200); // just so happens that our eigen vector finder gives a different // sign than R's. GoldVec.Column(1) = GoldVec.Column(1) * -1; // GoldVec.Column(2) = GoldVec.Column(2) * -1; Diff = EVec - GoldVec; maxDiff = MaximumAbsoluteValue(Diff); if(maxDiff > .00001) { CPPUNIT_ASSERT(false); } vector<int> clusters; SpectClust::partitionClusters(Dist, EVec, eVals, 2, clusters, 4, 0, 1); vector<int> pos, neg; for(int i = 0; i < clusters.size(); i++) { if(clusters[i] == 0) { neg.push_back(i); } else if(clusters[i] == 1) { pos.push_back(i); } else { Err::errAbort("Only expecting 0 or 1"); } } Matrix Pos(pos.size(), 1), Neg(neg.size(),1); for(int i = 0; i < pos.size(); i++) { Pos.element(i,0) = pos[i] + 1; } for(int i = 0; i < neg.size(); i++) { Neg.element(i,0) = neg[i] + 1; } Matrix GoldPos, GoldNeg; RFileToMatrix(GoldPos, "data/spike-in.norm.angleDist.pos.b12.txt"); RFileToMatrix(GoldNeg, "data/spike-in.norm.angleDist.neg.b12.txt"); Diff = Pos - GoldPos; maxDiff = MaximumAbsoluteValue(Diff); if(maxDiff > .00001) { CPPUNIT_ASSERT(false); } Diff = Neg - GoldNeg; maxDiff = MaximumAbsoluteValue(Diff); if(maxDiff > .00001) { CPPUNIT_ASSERT(false); } vector<double> clusterVals; SpectClust::orderIntraClusterCorr(M, clusters, 2, clusterVals); }
inline ColumnVector col(Matrix const& m, size_t n) { return m.Column(n); }
int unsupervised::estimate(int k_max, const Matrix& obs){ if(FLAG) return 0; Dim = obs.Nrows(); best_k_nz = k_max; int n=obs.Ncols(); int k_min=1; int k_nz=k_max; int t=0; int tmp_int = 0; int N = Dim+Dim*(Dim+1)/2; double L_min = L_MAX; double tmp = 0.0; double tmp2 = 0.0; double criterion = 0.0; double previous = 0.0; Double2D u = AllocDouble_2D(n, k_max); Double2D w = AllocDouble_2D(n, k_max); Double1D alpha = AllocDouble_1D(k_max); best_alpha = AllocDouble_1D(k_max); bool flag = false; bool first_flag = true; mu = new ColumnVector [k_max]; sigma = new Matrix [k_max]; for(int i=0;i<k_max;i++){ mu[i].ReSize(Dim); sigma[i].ReSize(Dim,Dim); } /*Initialization*/ Double2D tmp4 = AllocDouble_2D(Dim,n); Double2D tmp_vectors = AllocDouble_2D(n,Dim); Int1D tmp5 = AllocInt_1D(n); for(int j=0;j<n;j++){ for(int i=0;i<Dim;i++){ tmp4[i][j] = obs.element(i,j); tmp_vectors[j][i]=0.0; } tmp5[j] = 0; } //initial estimate. reestimate in case a cluster is empty, and reduce maximum number of clusters k_Mean(k_max,tmp4,tmp5,Dim,n); int r,roop; /*Initialization of average and variance*/ for(int j=0;j<k_max;j++){ r=0; mu[j] = 0.0; sigma[j] = 0.0; for(int k=0;k<n;k++){ if(tmp5[k] == j){ for(int l=0;l<Dim;l++){ mu[j].element(l) += tmp4[l][k]; tmp_vectors[k][l] = tmp4[l][k]; } r++; } } /*The routine for avoiding the non-positive definite matrix, in case of data<dimension,*/ roop = 0; while(r <= Dim){ r++; for(int check=1; roop<n || check; roop++){ if(tmp5[roop]!=j){ for(int l=0;l<Dim;l++){ mu[j].element(l) += tmp4[l][roop]; tmp_vectors[r][l] = tmp4[l][roop]; check = 0; } } } } best_alpha[j] = alpha[j] = (double) r / n; //std::cout<<"j "<<j<<" "<<alpha[j]<<std::endl; if (r) mu[j] /= r; for(int k=0;k<Dim;k++){ for(int l=0;l<Dim;l++){ for(int m=0;m<r;m++) sigma[j].element(k,l) += (tmp_vectors[m][k] - mu[j].element(k)) * (tmp_vectors[m][l] - mu[j].element(l)); if (r) sigma[j].element(k,l) /= r; sigma[j].element(k,l) *= 2; } } /*previous routine is not perfect scheme, finally we check if the covariance is positive definite*/ if(sigma[j].Determinant()<=0){ for(int k=0;k<Dim;k++){ for(int l=0;l<Dim;l++){ if(k==l) sigma[j].element(k,l) = 1.0; else sigma[j].element(k,l) = 0.0; } } } } FreeDouble_2D(tmp4,Dim,n); FreeInt_1D(tmp5); FreeDouble_2D(tmp_vectors,n,Dim); ColumnVector *mix_mu = new ColumnVector [k_max]; Matrix *mix_sigma = new Matrix [k_max]; for(int m=0;m<k_max;m++){ mix_mu[m].ReSize(Dim); mix_sigma[m].ReSize(Dim,Dim); for(int d=0;d<Dim;d++){ mix_mu[m].element(d) = mu[m].element(d); for(int dd=0;dd<Dim;dd++){ mix_sigma[m].element(d,dd) = sigma[m].element(d,dd); } } } for(int i=0;i<n;i++){ for(int m=0;m<k_max;m++){ u[i][m] = gaussian(m, obs.Column(i+1)); if(u[i][m]<UNDER_PROB) u[i][m] = UNDER_PROB; w[i][m]=0.0; } } #if 1 do{ if(!t) criterion = L_MAX; first_flag = true; do{ if(!first_flag) previous = criterion; else{ previous = L_MAX; first_flag = false; } flag = false; t += 1; for(int m=0;m<k_max;m++){ //std::cout<<"m "<<m<<" "<<alpha[m]<<std::endl; if(!alpha[m]) flag = true; for(int i=0;i<n;i++){ tmp = 0.0; for(int j=0;j<k_max;j++) { tmp += alpha[j]*u[i][j]; //std::cout<<j<<" "<<tmp<<" "<<alpha[j]<<" "<<u[i][j]<<std::endl; } if(tmp ) { //std::cout<<"tmp2 "<<tmp<<" "<< alpha[m] * u[i][m] / tmp<<std::endl; w[i][m] = alpha[m] * u[i][m] / tmp; }else{ w[i][m]=0; } } if(!flag){ alpha[m] = 0.0; for(int i=0;i<n;i++) { alpha[m] += w[i][m]; //std::cout<<i<<" "<<alpha[m]<<" "<<w[i][m]<<std::endl; } alpha[m] = max(0.0, alpha[m] - (double) N / 2.0); tmp = 0.0; for(int j=0;j<k_max;j++){ tmp2 = 0.0; for(int i=0;i<n;i++) tmp2 += w[i][j]; //std::cout<<j<<" "<<tmp2<<" "<<N<<std::endl; tmp += max(0.0, tmp2 - (double) N / 2.0); } //std::cout<<"tmp "<<tmp<<" "<<alpha[m]<<std::endl; if(tmp ) alpha[m] = alpha[m] / tmp; else alpha[m] = 0.0; } tmp = 0.0; for(int l=0;l<k_max;l++) tmp += alpha[l]; for(int l=0;l<k_max;l++) alpha[l] /= tmp; if(alpha[m] ){ mix_mu[m] = 0.0; mix_sigma[m] = 0.0; tmp = 0.0; for(int i=0;i<n;i++){ tmp += w[i][m]; mix_mu[m] += w[i][m] * obs.Column(i+1); } if(tmp ) mix_mu[m] /= tmp; for(int i=0;i<n;i++) mix_sigma[m] += w[i][m] * (obs.Column(i+1)-mix_mu[m])*(obs.Column(i+1)-mix_mu[m]).t(); if(tmp ) mix_sigma[m] /= tmp; for(int i=0;i<n;i++){ u[i][m] = gaussian(obs.Column(i+1), mix_mu[m], mix_sigma[m]); if(u[i][m]<UNDER_PROB) u[i][m] = UNDER_PROB; } } else{ if(!flag) k_nz--; } } tmp = 0.0; flag = false; criterion = (double) k_nz*log((double) n/12.0) / 2.0 + (double) k_nz*(N+1)/2.0; for(int m=0;m<k_max;m++){ //std::cout<<tmp<<" "<<alpha[m]<<" "<<n<<" "<<log((double) n*alpha[m] / 12.0) << std::endl; if(alpha[m]) tmp += log((double) n*alpha[m] / 12.0); } //std::cout<<tmp<<" "<<criterion<<std::endl; criterion += (double) N * tmp / 2.0; tmp = 0.0; for(int i=0;i<n;i++){ tmp2 = 0.0; for(int m=0;m<k_max;m++) tmp2 += alpha[m]*u[i][m]; if(tmp2>UNDER_PROB ) tmp += log(tmp2); else flag = true; } if(flag) criterion = previous; else criterion -= tmp; /*output the progression if(criterion==L_MAX) fprintf(stderr,"%d L_MAX(%d)\n",t, best_k_nz); else if(criterion<L_min) fprintf(stderr,"%d %lf(%d)\n",t, criterion, best_k_nz); else fprintf(stderr,"%d %lf(%d)\n",t, L_min, best_k_nz); */ //fprintf(stderr,"%d %lf(%d)\n",t, criterion, k_nz); }while((previous - criterion > THRESHOLD*fabs(previous))); if(criterion < L_min){ L_min = criterion; for(int k=0;k<best_k_nz;k++){ mu[k].CleanUp(); sigma[k].CleanUp(); } delete [] mu; delete [] sigma; FreeDouble_1D(best_alpha); best_alpha = AllocDouble_1D(k_nz); mu = new ColumnVector [k_nz]; sigma = new Matrix [k_nz]; for(int k=0, l=0;k<k_max;k++){ if(alpha[k] ){ best_alpha[l] = alpha[k]; mu[l].ReSize(Dim); sigma[l].ReSize(Dim,Dim); mu[l] = mix_mu[k]; sigma[l] = mix_sigma[k]; l++; } } best_k_nz = k_nz; } tmp = 1e306; tmp2 = 0.0; tmp_int = 0; for(int m=0;m<k_max;m++){ if(alpha[m] ){ tmp2 += alpha[m]; if(tmp>alpha[m]){ tmp = alpha[m]; tmp_int = m; } } } alpha[tmp_int] = 0.0; k_nz = 0; tmp2 -= tmp; if(tmp2 ){ for(int m=0;m<k_max;m++){ if(alpha[m]){ alpha[m] /= tmp2; k_nz++; } } } }while(k_nz>=k_min); FreeDouble_2D(u, n, k_max); FreeDouble_2D(w, n, k_max); FreeDouble_1D(alpha); for(int m=0;m<k_max;m++){ mix_mu[m].CleanUp(); mix_sigma[m].CleanUp(); } delete [] mix_mu; delete [] mix_sigma; #endif FLAG = true; //estimation is done. return best_k_nz; }
//Add a chunk of data void SOGP::addM(const Matrix& in,const Matrix& out){ for(int i=1;i<=in.Ncols();i++) { add(in.Column(i),out.Column(i)); } }
bool Matrix::EqualsType(const Matrix& matrix) const { return Row() == matrix.Row() && Column() == matrix.Column(); }
Matrix::Matrix(const Matrix& matrix) : Matrix(matrix.Row(), matrix.Column(), matrix.Values(), matrix.Size()) { }
double DistanceMetric::dist(const Matrix &M, int col1Ix, int col2Ix) const { Numeric dist = DotProduct(M.Column(col1Ix), M.Column(col2Ix)); return dist; }