/* Compute tilting angle --------------------------------------------------- */ void TiltPairAligner::computeGamma() { #define TRIANGLE_NO 15000 #define MIN_AREA 15 #define MAX_AREA 250000 gamma = 0; Matrix1D<int> iju(2), iku(2), ijt(2), ikt(2); // From i to j in untilted // From i to k in untilted // From i to j in tilted // From i to k in tilted int triang = 0; // Number of triangles considered int i, j, k, counter1; counter1 = 0; randomize_random_generator(); long noCombinations; noCombinations = Nu * (Nu - 1) * (Nu - 2) / 6; while (triang < TRIANGLE_NO && counter1 < noCombinations) { counter1++; i = (int)round(rnd_unif(0, Nu - 1)); j = (int)round(rnd_unif(0, Nu - 1)); k = (int)round(rnd_unif(0, Nu - 1)); // Compute area of triangle in untilted micrograph VECTOR_R2(iju, coordU[j] - coordU[i], coordU[j+1] - coordU[i+1]); VECTOR_R2(iku, coordU[k] - coordU[i], coordU[k+1] - coordU[i+1]); double untilted_area = fabs(dotProduct(iju, iku)/*/2*/); if (untilted_area < MIN_AREA ) continue; // For numerical stability // Compute area of the same triangle in the tilted micrograph VECTOR_R2(ijt, coordT[j] - coordT[i], coordT[j+1] - coordT[i+1]); VECTOR_R2(ikt, coordT[k] - coordT[i], coordT[k+1] - coordT[i+1]); double tilted_area = fabs(dotProduct(ijt, ikt)/*/2*/); if (tilted_area < MIN_AREA ) continue; // For numerical stability if (tilted_area > MAX_AREA ) continue; // micrograph are not perfect // sheets so avoid // very far away particles // Now we know that tilted_area=untilted_area*cos(gamma) if (tilted_area > untilted_area) continue; // There are user errors // In the point selection gamma += acos(tilted_area / untilted_area); triang++; } gamma /= triang; gamma = RAD2DEG(gamma); if (triang < 100) std::cout << "Not many particles, tilt angle may not be accurate" << std::endl; }
// Init random ............................................................. void FileName::initRandom(int length) { randomize_random_generator(); *this = ""; for (int i = 0; i < length; i++) *this += 'a' + FLOOR(rnd_unif(0, 26)); }
void randomizePhasesBeyond(MultidimArray<double>& v, int index) { MultidimArray< Complex > FT; FourierTransformer transformer; transformer.FourierTransform(v, FT, false); int index2 = index * index; FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(FT) { if (kp * kp + ip * ip + jp * jp >= index2) { double mag = abs(DIRECT_A3D_ELEM(FT, k, i, j)); double phas = rnd_unif(0., 2.*PI); double realval = mag * cos(phas); double imagval = mag * sin(phas); DIRECT_A3D_ELEM(FT, k, i, j) = Complex(realval, imagval); } } // Inverse transform transformer.inverseFourierTransform(); }
EnsembleNaiveBayes::EnsembleNaiveBayes( const std::vector < MultidimArray<double> > &features, const Matrix1D<double> &priorProbs, int discreteLevels, int numberOfClassifiers, double samplingFeatures, double samplingIndividuals, const std::string &newJudgeCombination) { int NFeatures=XSIZE(features[0]); int NsubFeatures=CEIL(NFeatures*samplingFeatures); K=features.size(); judgeCombination=newJudgeCombination; #ifdef WEIGHTED_SAMPLING // Measure the classification power of each variable NaiveBayes *nb_weights=new NaiveBayes(features, priorProbs, discreteLevels); MultidimArray<double> weights=nb_weights->__weights; delete nb_weights; double sumWeights=weights.sum(); #endif for (int n=0; n<numberOfClassifiers; n++) { // Produce the set of features for this subclassifier MultidimArray<int> subFeatures(NsubFeatures); FOR_ALL_ELEMENTS_IN_ARRAY1D(subFeatures) { #ifdef WEIGHTED_SAMPLING double random_sum_weight=rnd_unif(0,sumWeights); int j=0; do { double wj=DIRECT_A1D_ELEM(weights,j); if (wj<random_sum_weight) { random_sum_weight-=wj; j++; if (j==NFeatures) { j=NFeatures-1; break; } } else break; } while (true); DIRECT_A1D_ELEM(subFeatures,i)=j; #else DIRECT_A1D_ELEM(subFeatures,i)=round(rnd_unif(0,NFeatures-1)); #endif } // Container for the new training sample std::vector< MultidimArray<double> > newFeatures; // Produce the data set for each class for (int k=0; k<K; k++) { int NIndividuals=YSIZE(features[k]); int NsubIndividuals=CEIL(NIndividuals*samplingIndividuals); MultidimArray<int> subIndividuals(NsubIndividuals); FOR_ALL_ELEMENTS_IN_ARRAY1D(subIndividuals) subIndividuals(i)=ROUND(rnd_unif(0,NsubIndividuals-1)); MultidimArray<double> newFeaturesK; newFeaturesK.initZeros(NsubIndividuals,NsubFeatures); const MultidimArray<double>& features_k=features[k]; FOR_ALL_ELEMENTS_IN_ARRAY2D(newFeaturesK) DIRECT_A2D_ELEM(newFeaturesK,i,j)=DIRECT_A2D_ELEM(features_k, DIRECT_A1D_ELEM(subIndividuals,i), DIRECT_A1D_ELEM(subFeatures,j)); newFeatures.push_back(newFeaturesK); } // Create a Naive Bayes classifier with this data NaiveBayes *nb=new NaiveBayes(newFeatures, priorProbs, discreteLevels); ensemble.push_back(nb); ensembleFeatures.push_back(subFeatures); } }
void ProbabilisticPCA::reduceDimensionality() { size_t N=MAT_YSIZE(*X); // N= number of rows of X size_t D=MAT_XSIZE(*X); // D= number of columns of X bool converged=false; size_t iter=0; double sigma2=rnd_unif()*2; double Q=MAXDOUBLE, oldQ; Matrix2D<double> S, W, inW, invM, Ez, WtX, Wp1, Wp2, invWp2, WinvM, WinvMWt, WtSDIW, invCS; // Compute variance and row energy subtractColumnMeans(*X); matrixOperation_AtA(*X,S); S/=(double)N; Matrix1D<double> normX; X->rowEnergySum(normX); W.initRandom(D,outputDim,0,2,RND_UNIFORM); matrixOperation_AtA(W,inW); MultidimArray <double> Ezz(N,outputDim,outputDim); while (!converged && iter<=Niters) { ++iter; // Perform E-step // Ez=(W^t*W)^-1*W^t*X^t for (size_t i=0; i<outputDim; ++i) MAT_ELEM(inW,i,i)+=sigma2; inW.inv(invM); matrixOperation_AtBt(W,*X,WtX); matrixOperation_AB(invM,WtX,Ez); for (size_t k=0; k<N; ++k) FOR_ALL_ELEMENTS_IN_MATRIX2D(invM) DIRECT_A3D_ELEM(Ezz,k,i,j)=MAT_ELEM(invM,i,j)*sigma2+MAT_ELEM(Ez,i,k)*MAT_ELEM(Ez,j,k); // Perform M-step (maximize mapping W) Wp1.initZeros(D,outputDim); Wp2.initZeros(outputDim,outputDim); for (size_t k=0; k<N; ++k) { FOR_ALL_ELEMENTS_IN_MATRIX2D(Wp1) MAT_ELEM(Wp1,i,j)+=MAT_ELEM(*X,k,i)*MAT_ELEM(Ez,j,k); FOR_ALL_ELEMENTS_IN_MATRIX2D(Wp2) MAT_ELEM(Wp2,i,j)+=DIRECT_A3D_ELEM(Ezz,k,i,j); } Wp2.inv(invWp2); matrixOperation_AB(Wp1,invWp2,W); matrixOperation_AtA(W,inW); // Update sigma2 double sigma2_new=0; for (size_t k=0; k<N; ++k){ double EzWtX=0; FOR_ALL_ELEMENTS_IN_MATRIX2D(W) EzWtX+=MAT_ELEM(*X,k,i)*MAT_ELEM(W,i,j)*MAT_ELEM(Ez,j,k); double t=0; for (size_t i = 0; i < outputDim; ++i) { double aux=0.; for (size_t kk = 0; kk < outputDim; ++kk) aux += DIRECT_A3D_ELEM(Ezz,k,i,kk) * MAT_ELEM(inW, kk, i); t+=aux; } sigma2_new += VEC_ELEM(normX,k) - 2 * EzWtX + t; } sigma2_new/=(double) N * (double) D; //Compute likelihood of new model oldQ = Q; if (iter > 1) { matrixOperation_AB(W,invM,WinvM); matrixOperation_ABt(WinvM,W,WinvMWt); matrixOperation_IminusA(WinvMWt); WinvMWt*=1/sigma2_new; matrixOperation_AtA(W,WtSDIW); WtSDIW*=1/sigma2_new; matrixOperation_IplusA(WtSDIW); double detC = pow(sigma2_new,D)* WtSDIW.det(); matrixOperation_AB(WinvMWt,S,invCS); Q = (N*(-0.5)) * (D * log (2*PI) + log(detC) + invCS.trace()); } // Stop condition to detect convergence // Must not apply to the first iteration, because then it will end inmediately if (iter>2 && abs(oldQ-Q) < 0.001) converged=true; sigma2=sigma2_new; } //mapping.M = (inW \ W')'; matrixOperation_ABt(W,inW.inv(),A); matrixOperation_AB(*X,A,Y); if (fnMapping!="") A.write(fnMapping); }