示例#1
0
void Postprocessing::applyFscWeighting(MultidimArray<Complex > &FT, MultidimArray<DOUBLE> my_fsc)
{
	// Find resolution where fsc_true drops below zero for the first time
	// Set all weights to zero beyond that resolution
	int ires_max = 0 ;
	FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY1D(my_fsc)
	{
		if (DIRECT_A1D_ELEM(my_fsc, i) < 1e-10)
			break;
		ires_max = i;
	}
	FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(FT)
	{
    	int ires = ROUND(sqrt((DOUBLE)kp * kp + ip * ip + jp * jp));
		if (ires <= ires_max)
		{
	        DOUBLE fsc = DIRECT_A1D_ELEM(my_fsc, ires);
	        if (fsc < 1e-10)
	        	REPORT_ERROR("Postprocessing::applyFscWeighting BUG: fsc <= 0");
	        DIRECT_A3D_ELEM(FT, k, i, j) *= sqrt((2 * fsc) / (1 + fsc));
		}
		else
		{
			DIRECT_A3D_ELEM(FT, k, i, j) = 0.;
		}
	}

}
示例#2
0
/* ------------------------------------------------------------------------- */
NaiveBayes::NaiveBayes(
    const std::vector< MultidimArray<double> > &features,
    const Matrix1D<double> &priorProbs,
    int discreteLevels)
{
    K = features.size();
    Nfeatures=XSIZE(features[0]);
    __priorProbsLog10.initZeros(K);
    FOR_ALL_ELEMENTS_IN_MATRIX1D(__priorProbsLog10)
    VEC_ELEM(__priorProbsLog10,i)=log10(VEC_ELEM(priorProbs,i));

    // Create a dummy leaf for features that cannot classify
    std::vector < MultidimArray<double> > aux(K);
    dummyLeaf=new LeafNode(aux,0);

    // Build a leafnode for each feature and assign a weight
    __weights.initZeros(Nfeatures);
    for (int f=0; f<Nfeatures; f++)
    {
        for (int k=0; k<K; k++)
            features[k].getCol(f, aux[k]);
        LeafNode *leaf=new LeafNode(aux,discreteLevels);
        if (leaf->__discreteLevels>0)
        {
            __leafs.push_back(leaf);
            DIRECT_A1D_ELEM(__weights,f)=__leafs[f]->computeWeight();
        }
        else
        {
            __leafs.push_back(dummyLeaf);
            DIRECT_A1D_ELEM(__weights,f)=0;
            delete leaf;
        }
#ifdef DEBUG_WEIGHTS

        if(debugging == true)
        {
            std::cout << "Node " << f << std::endl;
            std::cout << *(__leafs[f]) << std::endl;
            //char c;
            //std::cin >> c;
        }
#endif

    }
    double norm=__weights.computeMax();
    if (norm>0)
    	__weights *= 1.0/norm;

    // Set default cost matrix
    __cost.resizeNoCopy(K,K);
    __cost.initConstant(1);
    for (int i=0; i<K; i++)
        MAT_ELEM(__cost,i,i)=0;
}
示例#3
0
void Postprocessing::makeGuinierPlot(MultidimArray<Complex > &FT, std::vector<fit_point2D> &guinier)
{

	MultidimArray<int> radial_count(XSIZE(FT));
	MultidimArray<DOUBLE> lnF(XSIZE(FT));
	fit_point2D      onepoint;

	FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(FT)
	{
    	int r2 = kp * kp + ip * ip + jp * jp;
    	int ires = ROUND(sqrt((DOUBLE)r2));
		if (ires < XSIZE(radial_count))
		{

	        lnF(ires) += abs(DIRECT_A3D_ELEM(FT, k, i, j));
	        radial_count(ires)++;
		}
	}

	DOUBLE xsize = XSIZE(I1());
	guinier.clear();
	FOR_ALL_ELEMENTS_IN_ARRAY1D(radial_count)
	{

		DOUBLE res = (xsize * angpix)/(DOUBLE)i; // resolution in Angstrom
		if (res >= angpix * 2.) // Apply B-factor sharpening until Nyquist, then low-pass filter later on (with a soft edge)
        {
            onepoint.x = 1. / (res * res);
            if (DIRECT_A1D_ELEM(lnF, i) > 0.)
            {
                onepoint.y = log ( DIRECT_A1D_ELEM(lnF, i) / DIRECT_A1D_ELEM(radial_count, i) );
                if (res <= fit_minres && res >= fit_maxres)
                {
                    onepoint.w = 1.;
                }
                else
                {
                    onepoint.w = 0.;
                }
            }
            else
            {
                onepoint.y = -99.;
                onepoint.w = 0.;
            }
            //std::cerr << " onepoint.x= " << onepoint.x << " onepoint.y= " << onepoint.y << " onepoint.w= " << onepoint.w << std::endl;
            guinier.push_back(onepoint);
        }
	}

}
示例#4
0
// Value access
double TabFtBlob::operator()(double val) const
{
    int idx = (int)( ABS(val) / sampling);
    if (idx >= XSIZE(tabulatedValues))
        return 0.;
    else
        return DIRECT_A1D_ELEM(tabulatedValues, idx);
}
    void run()
    {
        // Get angles ==============================================================
        MetaData angles;
        angles.read(fnIn);
        size_t AngleNo = angles.size();
        if (AngleNo == 0 || !angles.containsLabel(MDL_ANGLE_ROT))
            REPORT_ERROR(ERR_MD_BADLABEL, "Input file doesn't contain angular information");

        double maxWeight = -99.e99;
        MultidimArray<double> weight;
        weight.initZeros(AngleNo);
        if (angles.containsLabel(MDL_WEIGHT))
        {
            // Find maximum weight
            int i=0;
            FOR_ALL_OBJECTS_IN_METADATA(angles)
            {
                double w;
                angles.getValue(MDL_WEIGHT,w,__iter.objId);
                DIRECT_A1D_ELEM(weight,i++)=w;
                maxWeight=XMIPP_MAX(w,maxWeight);
            }
        }
示例#6
0
void Steerable::singleFilter(const MultidimArray<double>& Vin,
    MultidimArray<double> &hx1, MultidimArray<double> &hy1, MultidimArray<double> &hz1,
    MultidimArray<double> &Vout){

    MultidimArray< std::complex<double> > H, Aux;
    Vout.initZeros(Vin);

    // Filter in X
    #define MINUS_ONE_POWER(n) (((n)%2==0)? 1:-1)
    FourierTransformer transformer;
    transformer.FourierTransform(hx1,H);
    
    FOR_ALL_ELEMENTS_IN_ARRAY1D(H)
          H(i)*= MINUS_ONE_POWER(i);

    FourierTransformer transformer2;
    
    MultidimArray<double> aux(XSIZE(Vin));
        	   
    transformer2.setReal(aux);		   
		   
    for (size_t k=0; k<ZSIZE(Vin); k++)
        for (size_t i=0; i<YSIZE(Vin); i++)
        {
            for (size_t j=0; j<XSIZE(Vin); j++)
                DIRECT_A1D_ELEM(aux,j)=DIRECT_A3D_ELEM(Vin,k,i,j);
			    
	    transformer2.FourierTransform( );	    
	    transformer2.getFourierAlias( Aux );
	    Aux*=H;
	    transformer2.inverseFourierTransform( );
            	    
	    for (size_t j=0; j<XSIZE(Vin); j++)
                DIRECT_A3D_ELEM(Vout,k,i,j)=XSIZE(aux)*DIRECT_A1D_ELEM(aux,j);
        }

    // Filter in Y
    transformer.FourierTransform(hy1,H);
    
    FOR_ALL_ELEMENTS_IN_ARRAY1D(H)
          H(i)*= MINUS_ONE_POWER(i);

    aux.initZeros(YSIZE(Vin));
    transformer2.setReal(aux);		   
    
    for (size_t k=0; k<ZSIZE(Vin); k++)
        for (size_t j=0; j<XSIZE(Vin); j++)
        {
            for (size_t i=0; i<YSIZE(Vin); i++)
                DIRECT_A1D_ELEM(aux,i)=DIRECT_A3D_ELEM(Vout,k,i,j);

	    transformer2.FourierTransform( );	    
	    transformer2.getFourierAlias( Aux );
	    Aux*=H;
	    transformer2.inverseFourierTransform( );
            
	    for (size_t i=0; i<YSIZE(Vin); i++)
                DIRECT_A3D_ELEM(Vout,k,i,j)=XSIZE(aux)*DIRECT_A1D_ELEM(aux,i);
        }

    // Filter in Z

    transformer.FourierTransform(hz1,H);

    FOR_ALL_ELEMENTS_IN_ARRAY1D(H)
          H(i)*= MINUS_ONE_POWER(i);

    aux.initZeros(ZSIZE(Vin));    
    transformer2.setReal(aux);		   

    for (size_t i=0; i<YSIZE(Vin); i++)
        for (size_t j=0; j<XSIZE(Vin); j++)
        {
            for (size_t k=0; k<ZSIZE(Vin); k++)
                DIRECT_A1D_ELEM(aux,k)=DIRECT_A3D_ELEM(Vout,k,i,j);

	    transformer2.FourierTransform( );	    
	    transformer2.getFourierAlias( Aux );
	    Aux*=H;
	    transformer2.inverseFourierTransform( );

            for (size_t k=0; k<ZSIZE(Vin); k++)
                DIRECT_A3D_ELEM(Vout,k,i,j)=XSIZE(aux)*DIRECT_A1D_ELEM(aux,k);
        }
    
    // If Missing wedge
    if (MW!=NULL)
        MW->removeWedge(Vout);
}
示例#7
0
文件: fftw.cpp 项目: shy3u/GeRelion
/** Kullback-Leibner divergence */
double getKullbackLeibnerDivergence(MultidimArray<Complex >& Fimg,
                                    MultidimArray<Complex >& Fref, MultidimArray<double>& sigma2,
                                    MultidimArray<double>& p_i, MultidimArray<double>& q_i, int highshell, int lowshell)
{
	// First check dimensions are OK
	if (!Fimg.sameShape(Fref))
	{
		REPORT_ERROR("getKullbackLeibnerDivergence ERROR: Fimg and Fref are not of the same shape.");
	}

	if (highshell < 0)
	{
		highshell = XSIZE(Fimg) - 1;
	}
	if (lowshell < 0)
	{
		lowshell = 0;
	}

	if (highshell > XSIZE(sigma2))
	{
		REPORT_ERROR("getKullbackLeibnerDivergence ERROR: highshell is larger than size of sigma2 array.");
	}

	if (highshell < lowshell)
	{
		REPORT_ERROR("getKullbackLeibnerDivergence ERROR: highshell is smaller than lowshell.");
	}

	// Initialize the histogram
	MultidimArray<int> histogram;
	int histogram_size = 101;
	int histogram_origin = histogram_size / 2;
	double sigma_max = 10.;
	double histogram_factor = histogram_origin / sigma_max;
	histogram.initZeros(histogram_size);

	// This way this will work in both 2D and 3D
	FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(Fimg)
	{
		int ires = ROUND(sqrt(kp * kp + ip * ip + jp * jp));
		if (ires >= lowshell && ires <= highshell)
		{
			// Use FT of masked image for noise estimation!
			double diff_real = (DIRECT_A3D_ELEM(Fref, k, i, j)).real - (DIRECT_A3D_ELEM(Fimg, k, i, j)).real;
			double diff_imag = (DIRECT_A3D_ELEM(Fref, k, i, j)).imag - (DIRECT_A3D_ELEM(Fimg, k, i, j)).imag;
			double sigma = sqrt(DIRECT_A1D_ELEM(sigma2, ires));

			// Divide by standard deviation to normalise all the difference
			diff_real /= sigma;
			diff_imag /= sigma;

			// Histogram runs from -10 sigma to +10 sigma
			diff_real += sigma_max;
			diff_imag += sigma_max;

			// Make histogram on-the-fly;
			// Real part
			int ihis = ROUND(diff_real * histogram_factor);
			if (ihis < 0)
			{
				ihis = 0;
			}
			else if (ihis >= histogram_size)
			{
				ihis = histogram_size - 1;
			}
			histogram(ihis)++;
			// Imaginary part
			ihis = ROUND(diff_imag * histogram_factor);
			if (ihis < 0)
			{
				ihis = 0;
			}
			else if (ihis > histogram_size)
			{
				ihis = histogram_size;
			}
			histogram(ihis)++;

		}
	}

	// Normalise the histogram and the discretised analytical Gaussian
	double norm = (double)histogram.sum();
	double gaussnorm = 0.;
	for (int i = 0; i < histogram_size; i++)
	{
		double x = (double)i / histogram_factor;
		gaussnorm += gaussian1D(x - sigma_max, 1. , 0.);
	}

	// Now calculate the actual Kullback-Leibner divergence
	double kl_divergence = 0.;
	p_i.resize(histogram_size);
	q_i.resize(histogram_size);
	for (int i = 0; i < histogram_size; i++)
	{
		// Data distribution
		p_i(i) = (double)histogram(i) / norm;
		// Theoretical distribution
		double x = (double)i / histogram_factor;
		q_i(i) = gaussian1D(x - sigma_max, 1. , 0.) / gaussnorm;

		if (p_i(i) > 0.)
		{
			kl_divergence += p_i(i) * log(p_i(i) / q_i(i));
		}
	}
	kl_divergence /= (double)histogram_size;

	return kl_divergence;

}
示例#8
0
文件: fftw.cpp 项目: shy3u/GeRelion
// Shift an image through phase-shifts in its Fourier Transform (without pretabulated sine and cosine)
void shiftImageInFourierTransform(MultidimArray<Complex >& in,
                                  MultidimArray<Complex >& out,
                                  double oridim, Matrix1D<double> shift)
{
	out.resize(in);
	shift /= -oridim;
	double dotp, a, b, c, d, ac, bd, ab_cd, x, y, z, xshift, yshift, zshift;
	switch (in.getDim())
	{
	case 1:
		xshift = XX(shift);
		if (ABS(xshift) < XMIPP_EQUAL_ACCURACY)
		{
			out = in;
			return;
		}
		for (long int j = 0; j < XSIZE(in); j++)
		{
			x = j;
			dotp = 2 * PI * (x * xshift);
			a = cos(dotp);
			b = sin(dotp);
			c = DIRECT_A1D_ELEM(in, j).real;
			d = DIRECT_A1D_ELEM(in, j).imag;
			ac = a * c;
			bd = b * d;
			ab_cd = (a + b) * (c + d); // (ab_cd-ac-bd = ad+bc : but needs 4 multiplications)
			DIRECT_A1D_ELEM(out, j) = Complex(ac - bd, ab_cd - ac - bd);
		}
		break;
	case 2:
		xshift = XX(shift);
		yshift = YY(shift);
		if (ABS(xshift) < XMIPP_EQUAL_ACCURACY && ABS(yshift) < XMIPP_EQUAL_ACCURACY)
		{
			out = in;
			return;
		}
		for (long int i = 0; i < XSIZE(in); i++)
			for (long int j = 0; j < XSIZE(in); j++)
			{
				x = j;
				y = i;
				dotp = 2 * PI * (x * xshift + y * yshift);
				a = cos(dotp);
				b = sin(dotp);
				c = DIRECT_A2D_ELEM(in, i, j).real;
				d = DIRECT_A2D_ELEM(in, i, j).imag;
				ac = a * c;
				bd = b * d;
				ab_cd = (a + b) * (c + d);
				DIRECT_A2D_ELEM(out, i, j) =  Complex(ac - bd, ab_cd - ac - bd);
			}
		for (long int i = YSIZE(in) - 1; i >= XSIZE(in); i--)
		{
			y = i - YSIZE(in);
			for (long int j = 0; j < XSIZE(in); j++)
			{
				x = j;
				dotp = 2 * PI * (x * xshift + y * yshift);
				a = cos(dotp);
				b = sin(dotp);
				c = DIRECT_A2D_ELEM(in, i, j).real;
				d = DIRECT_A2D_ELEM(in, i, j).imag;
				ac = a * c;
				bd = b * d;
				ab_cd = (a + b) * (c + d);
				DIRECT_A2D_ELEM(out, i, j) = Complex(ac - bd, ab_cd - ac - bd);
			}
		}
		break;
	case 3:
		xshift = XX(shift);
		yshift = YY(shift);
		zshift = ZZ(shift);
		if (ABS(xshift) < XMIPP_EQUAL_ACCURACY && ABS(yshift) < XMIPP_EQUAL_ACCURACY && ABS(zshift) < XMIPP_EQUAL_ACCURACY)
		{
			out = in;
			return;
		}
		for (long int k = 0; k < ZSIZE(in); k++)
		{
			z = (k < XSIZE(in)) ? k : k - ZSIZE(in);
			for (long int i = 0; i < YSIZE(in); i++)
			{
				y = (i < XSIZE(in)) ? i : i - YSIZE(in);
				for (long int j = 0; j < XSIZE(in); j++)
				{
					x = j;
					dotp = 2 * PI * (x * xshift + y * yshift + z * zshift);
					a = cos(dotp);
					b = sin(dotp);
					c = DIRECT_A3D_ELEM(in, k, i, j).real;
					d = DIRECT_A3D_ELEM(in, k, i, j).imag;
					ac = a * c;
					bd = b * d;
					ab_cd = (a + b) * (c + d);
					DIRECT_A3D_ELEM(out, k, i, j) = Complex(ac - bd, ab_cd - ac - bd);
				}
			}
		}
		break;
	default:
		REPORT_ERROR("shiftImageInFourierTransform ERROR: dimension should be 1, 2 or 3!");
	}
}
void ProgSortByStatistics::processInputPrepare(MetaData &SF)
{
    PCAMahalanobisAnalyzer tempPcaAnalyzer;
    tempPcaAnalyzer.clear();

    Image<double> img;
    MultidimArray<double> img2;
    MultidimArray<int> radial_count;
    MultidimArray<double> radial_avg;
    Matrix1D<int> center(2);
    center.initZeros();

    if (verbose>0)
        std::cout << " Processing training set ..." << std::endl;

    int nr_imgs = SF.size();
    if (verbose>0)
        init_progress_bar(nr_imgs);
    int c = XMIPP_MAX(1, nr_imgs / 60);
    int imgno = 0, imgnoPCA=0;
    MultidimArray<float> v;
    MultidimArray<int> distance;
    int dim;

    bool thereIsEnable=SF.containsLabel(MDL_ENABLED);
    bool first=true;
    FOR_ALL_OBJECTS_IN_METADATA(SF)
    {
        if (thereIsEnable)
        {
            int enabled;
            SF.getValue(MDL_ENABLED,enabled,__iter.objId);
            if (enabled==-1)
                continue;
        }
        img.readApplyGeo(SF,__iter.objId);
        if (targetXdim!=-1 && targetXdim!=XSIZE(img()))
        	selfScaleToSize(LINEAR,img(),targetXdim,targetXdim,1);
        MultidimArray<double> &mI=img();
        mI.setXmippOrigin();
        mI.statisticsAdjust(0,1);

        // Overall statistics
        Histogram1D hist;
        compute_hist(mI,hist,-4,4,31);

        // Radial profile
        img2.resizeNoCopy(mI);
        FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(img2)
        {
            double val=DIRECT_MULTIDIM_ELEM(mI,n);
            DIRECT_MULTIDIM_ELEM(img2,n)=val*val;
        }
        if (first)
        {
            radialAveragePrecomputeDistance(img2, center, distance, dim);
            first=false;
        }
        fastRadialAverage(img2, distance, dim, radial_avg, radial_count);

        // Build vector
        v.initZeros(XSIZE(hist)+XSIZE(img2)/2);
        int idx=0;
        FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY1D(hist)
        v(idx++)=(float)DIRECT_A1D_ELEM(hist,i);
        for (size_t i=0; i<XSIZE(img2)/2; i++)
            v(idx++)=(float)DIRECT_A1D_ELEM(radial_avg,i);

        tempPcaAnalyzer.addVector(v);

        if (imgno % c == 0 && verbose>0)
            progress_bar(imgno);
        imgno++;
        imgnoPCA++;
    }
    if (verbose>0)
        progress_bar(nr_imgs);

    MultidimArray<double> vavg,vstddev;
    tempPcaAnalyzer.computeStatistics(vavg,vstddev);
    tempPcaAnalyzer.evaluateZScore(2,20,false);
    pcaAnalyzer.insert(pcaAnalyzer.begin(), tempPcaAnalyzer);
}
示例#10
0
// Split several histograms within the indexes l0 and lF so that
// the entropy after division is maximized
int splitHistogramsUsingEntropy(const std::vector<Histogram1D> &hist,
                                size_t l0, size_t lF)
{
    // Number of classes
    int K = hist.size();

    // Set everything outside l0 and lF to zero, and make it a PDF
    std::vector<Histogram1D> histNorm;
    for (int k = 0; k < K; k++)
    {
        Histogram1D histaux = hist[k];
        for (size_t l = 0; l < XSIZE(histaux); l++)
            if (l < l0 || l > lF)
                DIRECT_A1D_ELEM(histaux,l) = 0;
        histaux *= 1.0/histaux.sum();
        histNorm.push_back(histaux);
    }

    // Compute for each class the probability of being l<=l0 and l>l0
    MultidimArray<double> p(K, 2);
    for (int k = 0; k < K; k++)
    {
        const Histogram1D& histogram=histNorm[k];
        DIRECT_A2D_ELEM(p,k, 0) = DIRECT_A1D_ELEM(histogram,l0);
        DIRECT_A2D_ELEM(p,k, 1) = 0;
        for (size_t l = l0 + 1; l <= lF; l++)
            DIRECT_A2D_ELEM(p,k, 1) += DIRECT_A1D_ELEM(histogram,l);
    }

    // Compute the splitting l giving maximum entropy
    double maxEntropy = 0;
    int lmaxEntropy = -1;
    size_t l = l0;
    while (l < lF)
    {
        // Compute the entropy of the classes if we split by l
        double entropy = 0;
        FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(p)
        {
            double aux=DIRECT_MULTIDIM_ELEM(p,n);
            if (aux != 0)
                entropy -= aux * log10(aux);
        }

#ifdef DEBUG_SPLITTING_USING_ENTROPY
        std::cout << "Splitting at "  << l << " entropy=" << entropy
        << std::endl;
#endif

        // Check if this is the maximum
        if (entropy > maxEntropy)
        {
            maxEntropy = entropy;
            lmaxEntropy = l;
        }

        // Move to next split point
        ++l;

        // Update probabilities of being l<=l0 and l>l0
        for (int k = 0; k < K; k++)
        {
            const Histogram1D& histogram=histNorm[k];
            double aux=DIRECT_A1D_ELEM(histogram,l);
            DIRECT_A2D_ELEM(p,k, 0) += aux;
            DIRECT_A2D_ELEM(p,k, 1) -= aux;
        }
    }

#ifdef DEBUG_SPLITTING_USING_ENTROPY
    std::cout << "Finally in l=[" << l0 << "," << lF
    << " Max Entropy:" << maxEntropy
    << " lmax=" << lmaxEntropy << std::endl;
#endif

    // If the point giving the maximum entropy is too much on the extreme,
    // substitute it by the middle point
    if (lmaxEntropy<=2 || lmaxEntropy>=(int)lF-2)
        lmaxEntropy = (int)ceil((lF + l0)/2.0);

    return lmaxEntropy;
}
示例#11
0
void Postprocessing::writeOutput()
{

	if (verb > 0)
	{
		std::cout <<"== Writing out put files ..." <<std::endl;
	}
	// Write all relevant output information
	FileName fn_tmp;
	fn_tmp = fn_out + ".mrc";

	DOUBLE avg, stddev, minval, maxval;
    I1().computeStats(avg, stddev, minval, maxval);
    I1.MDMainHeader.setValue(EMDL_IMAGE_STATS_MIN, minval);
    I1.MDMainHeader.setValue(EMDL_IMAGE_STATS_MAX, maxval);
    I1.MDMainHeader.setValue(EMDL_IMAGE_STATS_AVG, avg);
    I1.MDMainHeader.setValue(EMDL_IMAGE_STATS_STDDEV, stddev);
    I1.MDMainHeader.setValue(EMDL_IMAGE_SAMPLINGRATE_X, angpix);
    I1.MDMainHeader.setValue(EMDL_IMAGE_SAMPLINGRATE_Y, angpix);
    I1.MDMainHeader.setValue(EMDL_IMAGE_SAMPLINGRATE_Z, angpix);
	I1.write(fn_tmp);
	if (verb > 0)
	{
		std::cout.width(35); std::cout << std::left   <<"  + Processed map: "; std::cout << fn_tmp<< std::endl;
	}

	// Also write the masked postprocessed map
	if (do_auto_mask || fn_mask != "")
	{
		fn_tmp = fn_out + "_masked.mrc";
		I1() *= Im();
	    I1().computeStats(avg, stddev, minval, maxval);
	    I1.MDMainHeader.setValue(EMDL_IMAGE_STATS_MIN, minval);
	    I1.MDMainHeader.setValue(EMDL_IMAGE_STATS_MAX, maxval);
	    I1.MDMainHeader.setValue(EMDL_IMAGE_STATS_AVG, avg);
	    I1.MDMainHeader.setValue(EMDL_IMAGE_STATS_STDDEV, stddev);
		I1.write(fn_tmp);
		if (verb > 0)
		{
			std::cout.width(35); std::cout << std::left   <<"  + Processed masked map: "; std::cout << fn_tmp<< std::endl;
		}
	}

	// Also write mask
	if (do_auto_mask)
	{
		fn_tmp = fn_out + "_automask.mrc";
		Im().computeStats(avg, stddev, minval, maxval);
		Im.MDMainHeader.setValue(EMDL_IMAGE_STATS_MIN, minval);
		Im.MDMainHeader.setValue(EMDL_IMAGE_STATS_MAX, maxval);
		Im.MDMainHeader.setValue(EMDL_IMAGE_STATS_AVG, avg);
		Im.MDMainHeader.setValue(EMDL_IMAGE_STATS_STDDEV, stddev);
		Im.write(fn_tmp);
		if (verb > 0)
		{
			std::cout.width(35); std::cout << std::left   <<"  + Auto-mask: "; std::cout << fn_tmp<< std::endl;
		}
	}

	// Write an output STAR file with FSC curves, Guinier plots etc
	std::ofstream  fh;
	fn_tmp = fn_out + ".star";
	if (verb > 0)
	{
		std::cout.width(35); std::cout << std::left <<"  + Metadata file: "; std::cout << fn_tmp<< std::endl;
	}

	fh.open((fn_tmp).c_str(), std::ios::out);
	if (!fh)
		REPORT_ERROR( (std::string)"MlOptimiser::write: Cannot write file: " + fn_tmp);

	// Write the command line as a comment in the header
	fh << "# RELION postprocess" << std::endl;
	fh << "# ";
	parser.writeCommandLine(fh);

	MetaDataTable MDlist, MDfsc, MDguinier;

	MDlist.setIsList(true);
	MDlist.setName("general");
	MDlist.addObject();
	MDlist.setValue(EMDL_POSTPROCESS_FINAL_RESOLUTION, global_resol);
	MDlist.setValue(EMDL_POSTPROCESS_BFACTOR, global_bfactor );
	if (do_auto_bfac)
	{
		MDlist.setValue(EMDL_POSTPROCESS_GUINIER_FIT_SLOPE, global_slope);
		MDlist.setValue(EMDL_POSTPROCESS_GUINIER_FIT_INTERCEPT, global_intercept);
		MDlist.setValue(EMDL_POSTPROCESS_GUINIER_FIT_CORRELATION, global_corr_coeff);
	}
	MDlist.write(fh);

	MDfsc.setName("fsc");
	FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY1D(fsc_true)
	{
		MDfsc.addObject();
		DOUBLE res = (i > 0) ? (XSIZE(I1()) * angpix / (DOUBLE)i) : 999.;
		MDfsc.setValue(EMDL_SPECTRAL_IDX, (int)i);
		MDfsc.setValue(EMDL_RESOLUTION, 1./res);
		MDfsc.setValue(EMDL_RESOLUTION_ANGSTROM, res);
		if (do_mask)
		{
			MDfsc.setValue(EMDL_POSTPROCESS_FSC_TRUE, DIRECT_A1D_ELEM(fsc_true, i) );
			MDfsc.setValue(EMDL_POSTPROCESS_FSC_UNMASKED, DIRECT_A1D_ELEM(fsc_unmasked, i) );
			MDfsc.setValue(EMDL_POSTPROCESS_FSC_MASKED, DIRECT_A1D_ELEM(fsc_masked, i) );
			MDfsc.setValue(EMDL_POSTPROCESS_FSC_RANDOM_MASKED, DIRECT_A1D_ELEM(fsc_random_masked, i) );
		}
		else
		{
			MDfsc.setValue(EMDL_POSTPROCESS_FSC_UNMASKED, DIRECT_A1D_ELEM(fsc_true, i) );
		}
	}
	MDfsc.write(fh);

	// Also write XML file with FSC_true curve for EMDB submission
	writeFscXml(MDfsc);

	MDguinier.setName("guinier");
	for (int i = 0; i < guinierin.size(); i++)
	{
		MDguinier.addObject();
		MDguinier.setValue(EMDL_POSTPROCESS_GUINIER_RESOL_SQUARED, guinierin[i].x);
		MDguinier.setValue(EMDL_POSTPROCESS_GUINIER_VALUE_IN, guinierin[i].y);
		if (fn_mtf != "")
			MDguinier.setValue(EMDL_POSTPROCESS_GUINIER_VALUE_INVMTF, guinierinvmtf[i].y);
		if (do_fsc_weighting)
			MDguinier.setValue(EMDL_POSTPROCESS_GUINIER_VALUE_WEIGHTED, guinierweighted[i].y);
		if (do_auto_bfac || ABS(adhoc_bfac) > 0.)
			MDguinier.setValue(EMDL_POSTPROCESS_GUINIER_VALUE_SHARPENED, guiniersharpen[i].y);
		if (do_auto_bfac)
			MDguinier.setValue(EMDL_POSTPROCESS_GUINIER_VALUE_INTERCEPT, global_intercept);
	}
	MDguinier.write(fh);

	fh.close();

	if (verb > 0)
	{
		std::cout.width(35); std::cout << std::left   <<"  + FINAL RESOLUTION: "; std::cout << global_resol<< std::endl;
	}

}
示例#12
0
void Postprocessing::divideByMtf(MultidimArray<Complex > &FT)
{

	if (fn_mtf != "")
	{
		if (verb > 0)
		{
			std::cout << "== Dividing map by the MTF of the detector ..." << std::endl;
			std::cout.width(35); std::cout << std::left <<"  + mtf STAR-file: "; std::cout << fn_mtf << std::endl;
		}

		MetaDataTable MDmtf;

		if (!fn_mtf.isStarFile())
			REPORT_ERROR("Postprocessing::divideByMtf ERROR: input MTF file is not a STAR file.");

		MDmtf.read(fn_mtf);
		MultidimArray<DOUBLE> mtf_resol, mtf_value;
		mtf_resol.resize(MDmtf.numberOfObjects());
		mtf_value.resize(mtf_resol);

		int i =0;
		FOR_ALL_OBJECTS_IN_METADATA_TABLE(MDmtf)
		{
			MDmtf.getValue(EMDL_RESOLUTION_INVPIXEL, DIRECT_A1D_ELEM(mtf_resol, i) ); // resolution needs to be given in 1/pix
			MDmtf.getValue(EMDL_POSTPROCESS_MTF_VALUE, DIRECT_A1D_ELEM(mtf_value, i) );
			if (DIRECT_A1D_ELEM(mtf_value, i) < 1e-10)
			{
				std::cerr << " i= " << i <<  " mtf_value[i]= " << DIRECT_A1D_ELEM(mtf_value, i) << std::endl;
				REPORT_ERROR("Postprocessing::sharpenMap ERROR: zero or negative values encountered in MTF curve!");
			}
			i++;
		}

	    DOUBLE xsize = (DOUBLE)XSIZE(I1());
	    FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(FT)
	    {
	    	int r2 = kp * kp + ip * ip + jp * jp;
	    	DOUBLE res = sqrt((DOUBLE)r2)/xsize; // get resolution in 1/pixel
			if (res < 0.5 )
			{

				// Find the suitable MTF value
				int i_0 = 0;
				for (int ii = 0; ii < XSIZE(mtf_resol); ii++)
				{
					if (DIRECT_A1D_ELEM(mtf_resol, ii) > res)
						break;
					i_0 = ii;
				}
				// linear interpolation: y = y_0 + (y_1 - y_0)*(x-x_0)/(x1_x0)
				DOUBLE mtf;
				DOUBLE x_0 = DIRECT_A1D_ELEM(mtf_resol, i_0);
				if (i_0 == MULTIDIM_SIZE(mtf_resol) - 1 || i_0 == 0) // check boundaries of the array
					mtf = DIRECT_A1D_ELEM(mtf_value, i_0);
				else
				{
					DOUBLE x_1 = DIRECT_A1D_ELEM(mtf_resol, i_0 + 1);
					DOUBLE y_0 = DIRECT_A1D_ELEM(mtf_value, i_0);
					DOUBLE y_1 = DIRECT_A1D_ELEM(mtf_value, i_0 + 1);
					mtf = y_0 + (y_1 - y_0)*(res - x_0)/(x_1 - x_0);
				}

				// Divide Fourier component by the MTF
				DIRECT_A3D_ELEM(FT, k, i, j) /= mtf;
			}
	    }

	}


}
//majorAxis and minorAxis is the estimated particle size in px
void ProgSortByStatistics::processInprocessInputPrepareSPTH(MetaData &SF, bool trained)
{
    //#define DEBUG
    PCAMahalanobisAnalyzer tempPcaAnalyzer0;
    PCAMahalanobisAnalyzer tempPcaAnalyzer1;
    PCAMahalanobisAnalyzer tempPcaAnalyzer2;
    PCAMahalanobisAnalyzer tempPcaAnalyzer3;
    PCAMahalanobisAnalyzer tempPcaAnalyzer4;

    //Morphology
    tempPcaAnalyzer0.clear();
    //Signal to noise ratio
    tempPcaAnalyzer1.clear();
    tempPcaAnalyzer2.clear();
    tempPcaAnalyzer3.clear();
    //Histogram analysis, to detect black points and saturated parts
    tempPcaAnalyzer4.clear();

    double sign = 1;//;-1;
    int numNorm = 3;
    int numDescriptors0=numNorm;
    int numDescriptors2=4;
    int numDescriptors3=11;
    int numDescriptors4 = 10;

    MultidimArray<float> v0(numDescriptors0);
    MultidimArray<float> v2(numDescriptors2);
    MultidimArray<float> v3(numDescriptors3);
    MultidimArray<float> v4(numDescriptors4);

    if (verbose>0)
    {
        std::cout << " Sorting particle set by new xmipp method..." << std::endl;
    }

    int nr_imgs = SF.size();
    if (verbose>0)
        init_progress_bar(nr_imgs);

    int c = XMIPP_MAX(1, nr_imgs / 60);
    int imgno = 0, imgnoPCA=0;

    bool thereIsEnable=SF.containsLabel(MDL_ENABLED);
    bool first=true;

    // We assume that at least there is one particle
    size_t Xdim, Ydim, Zdim, Ndim;
    getImageSize(SF,Xdim,Ydim,Zdim,Ndim);

    //Initialization:
    MultidimArray<double> nI, modI, tempI, tempM, ROI;
    MultidimArray<bool> mask;
    nI.resizeNoCopy(Ydim,Xdim);
    modI.resizeNoCopy(Ydim,Xdim);
    tempI.resizeNoCopy(Ydim,Xdim);
    tempM.resizeNoCopy(Ydim,Xdim);
    mask.resizeNoCopy(Ydim,Xdim);
    mask.initConstant(true);

    MultidimArray<double> autoCorr(2*Ydim,2*Xdim);
    MultidimArray<double> smallAutoCorr;

    Histogram1D hist;
    Matrix2D<double> U,V,temp;
    Matrix1D<double> D;

    MultidimArray<int> radial_count;
    MultidimArray<double> radial_avg;
    Matrix1D<int> center(2);
    MultidimArray<int> distance;
    int dim;
    center.initZeros();

    v0.initZeros(numDescriptors0);
    v2.initZeros(numDescriptors2);
    v3.initZeros(numDescriptors3);
    v4.initZeros(numDescriptors4);

    ROI.resizeNoCopy(Ydim,Xdim);
    ROI.setXmippOrigin();
    FOR_ALL_ELEMENTS_IN_ARRAY2D(ROI)
    {
        double temp = std::sqrt(i*i+j*j);
        if ( temp < (Xdim/2))
            A2D_ELEM(ROI,i,j)= 1;
        else
            A2D_ELEM(ROI,i,j)= 0;
    }

    Image<double> img;
    FourierTransformer transformer(FFTW_BACKWARD);

    FOR_ALL_OBJECTS_IN_METADATA(SF)
    {
        if (thereIsEnable)
        {
            int enabled;
            SF.getValue(MDL_ENABLED,enabled,__iter.objId);
            if ( (enabled==-1)  )
            {
                imgno++;
                continue;
            }
        }

        img.readApplyGeo(SF,__iter.objId);
        if (targetXdim!=-1 && targetXdim!=XSIZE(img()))
        	selfScaleToSize(LINEAR,img(),targetXdim,targetXdim,1);

        MultidimArray<double> &mI=img();
        mI.setXmippOrigin();
        mI.statisticsAdjust(0,1);
        mask.setXmippOrigin();
        //The size of v1 depends on the image size and must be declared here
        int numDescriptors1 = XSIZE(mI)/2; //=100;
        MultidimArray<float> v1(numDescriptors1);
        v1.initZeros(numDescriptors1);

        double var = 1;
        normalize(transformer,mI,tempI,modI,0,var,mask);
        modI.setXmippOrigin();
        tempI.setXmippOrigin();
        nI = sign*tempI*(modI*modI);
        tempM = (modI*modI);

        A1D_ELEM(v0,0) = (tempM*ROI).sum();
        int index = 1;
        var+=2;
        while (index < numNorm)
        {
            normalize(transformer,mI,tempI,modI,0,var,mask);
            modI.setXmippOrigin();
            tempI.setXmippOrigin();
            nI += sign*tempI*(modI*modI);
            tempM += (modI*modI);
            A1D_ELEM(v0,index) = (tempM*ROI).sum();
            index++;
            var+=2;
        }

        nI /= tempM;
        tempPcaAnalyzer0.addVector(v0);
        nI=(nI*ROI);

        auto_correlation_matrix(mI,autoCorr);
        if (first)
        {
            radialAveragePrecomputeDistance(autoCorr, center, distance, dim);
            first=false;
        }
        fastRadialAverage(autoCorr, distance, dim, radial_avg, radial_count);

        for (int n = 0; n < numDescriptors1; ++n)
            A1D_ELEM(v1,n)=(float)DIRECT_A1D_ELEM(radial_avg,n);

        tempPcaAnalyzer1.addVector(v1);

#ifdef DEBUG

        //String name = "000005@Images/Extracted/run_002/extra/BPV_1386.stk";
        String name = "000010@Images/Extracted/run_001/extra/KLH_Dataset_I_Training_0028.stk";
        //String name = "001160@Images/Extracted/run_001/DefaultFamily5";

        std::cout << img.name() << std::endl;

        if (img.name()==name2)
        {
            FileName fpName    = "test_1.txt";
            mI.write(fpName);
            fpName    = "test_2.txt";
            nI.write(fpName);
            fpName    = "test_3.txt";
            tempM.write(fpName);
            fpName    = "test_4.txt";
            ROI.write(fpName);
            //exit(1);
        }
#endif
        nI.binarize(0);
        int im = labelImage2D(nI,nI,8);
        compute_hist(nI, hist, 0, im, im+1);
        size_t l;
        int k,i,j;
        hist.maxIndex(l,k,i,j);
        A1D_ELEM(hist,j)=0;
        hist.maxIndex(l,k,i,j);
        nI.binarizeRange(j-1,j+1);

        double x0=0,y0=0,majorAxis=0,minorAxis=0,ellipAng=0;
        size_t area=0;
        fitEllipse(nI,x0,y0,majorAxis,minorAxis,ellipAng,area);

        A1D_ELEM(v2,0)=majorAxis/((img().xdim) );
        A1D_ELEM(v2,1)=minorAxis/((img().xdim) );
        A1D_ELEM(v2,2)= (fabs((img().xdim)/2-x0)+fabs((img().ydim)/2-y0))/((img().xdim)/2);
        A1D_ELEM(v2,3)=area/( (double)((img().xdim)/2)*((img().ydim)/2) );

        for (int n=0 ; n < numDescriptors2 ; n++)
        {
            if ( std::isnan(std::abs(A1D_ELEM(v2,n))))
                A1D_ELEM(v2,n)=0;
        }

        tempPcaAnalyzer2.addVector(v2);

        //mI.setXmippOrigin();
        //auto_correlation_matrix(mI*ROI,autoCorr);
        //auto_correlation_matrix(nI,autoCorr);
        autoCorr.window(smallAutoCorr,-5,-5, 5, 5);
        smallAutoCorr.copy(temp);
        svdcmp(temp,U,D,V);

        for (int n = 0; n < numDescriptors3; ++n)
            A1D_ELEM(v3,n)=(float)VEC_ELEM(D,n); //A1D_ELEM(v3,n)=(float)VEC_ELEM(D,n)/VEC_ELEM(D,0);

        tempPcaAnalyzer3.addVector(v3);


        double minVal=0.;
        double maxVal=0.;
        mI.computeDoubleMinMax(minVal,maxVal);
        compute_hist(mI, hist, minVal, maxVal, 100);

        for (int n=0 ; n <= numDescriptors4-1 ; n++)
        {
            A1D_ELEM(v4,n)= (hist.percentil((n+1)*10));
        }
        tempPcaAnalyzer4.addVector(v4);

#ifdef DEBUG

        if (img.name()==name1)
        {
            FileName fpName    = "test.txt";
            mI.write(fpName);
            fpName    = "test3.txt";
            nI.write(fpName);
        }
#endif
        imgno++;
        imgnoPCA++;

        if (imgno % c == 0 && verbose>0)
            progress_bar(imgno);
    }

    tempPcaAnalyzer0.evaluateZScore(2,20,trained);
    tempPcaAnalyzer1.evaluateZScore(2,20,trained);
    tempPcaAnalyzer2.evaluateZScore(2,20,trained);
    tempPcaAnalyzer3.evaluateZScore(2,20,trained);
    tempPcaAnalyzer4.evaluateZScore(2,20,trained);

    pcaAnalyzer.push_back(tempPcaAnalyzer0);
    pcaAnalyzer.push_back(tempPcaAnalyzer1);
    pcaAnalyzer.push_back(tempPcaAnalyzer1);
    pcaAnalyzer.push_back(tempPcaAnalyzer3);
    pcaAnalyzer.push_back(tempPcaAnalyzer4);

}
void ProgSortByStatistics::run()
{
    // Process input selfile ..............................................
    SF.read(fn);
    SF.removeDisabled();
    MetaData SF2 = SF;
    SF = SF2;

    bool trained = false;

    if (fn_train != "")
    {
        SFtrain.read(fn_train);
        processInprocessInputPrepareSPTH(SFtrain,trained);
        trained = true;
        processInprocessInputPrepareSPTH(SF,trained);
    }
    else
        processInprocessInputPrepareSPTH(SF,trained);

    int imgno = 0;
    int numPCAs = pcaAnalyzer.size();

    MultidimArray<double> finalZscore(SF.size());
    MultidimArray<double> ZscoreShape1(SF.size()), sortedZscoreShape1;
    MultidimArray<double> ZscoreShape2(SF.size()), sortedZscoreShape2;
    MultidimArray<double> ZscoreSNR1(SF.size()), sortedZscoreSNR1;
    MultidimArray<double> ZscoreSNR2(SF.size()), sortedZscoreSNR2;
    MultidimArray<double> ZscoreHist(SF.size()), sortedZscoreHist;


    finalZscore.initConstant(0);
    ZscoreShape1.resizeNoCopy(finalZscore);
    ZscoreShape2.resizeNoCopy(finalZscore);
    ZscoreSNR1.resizeNoCopy(finalZscore);
    ZscoreSNR2.resizeNoCopy(finalZscore);
    ZscoreHist.resizeNoCopy(finalZscore);
    sortedZscoreShape1.resizeNoCopy(finalZscore);
    sortedZscoreShape2.resizeNoCopy(finalZscore);
    sortedZscoreSNR1.resizeNoCopy(finalZscore);
    sortedZscoreSNR2.resizeNoCopy(finalZscore);
    sortedZscoreHist.resizeNoCopy(finalZscore);

    double zScore=0;
    int enabled;

    FOR_ALL_OBJECTS_IN_METADATA(SF)
    {
        SF.getValue(MDL_ENABLED,enabled,__iter.objId);
        if ( (enabled==-1)  )
        {
            A1D_ELEM(finalZscore,imgno) = 1e3;
            A1D_ELEM(ZscoreShape1,imgno) = 1e3;
            A1D_ELEM(ZscoreShape2,imgno) = 1e3;
            A1D_ELEM(ZscoreSNR1,imgno) = 1e3;
            A1D_ELEM(ZscoreSNR2,imgno) = 1e3;
            A1D_ELEM(ZscoreHist,imgno) = 1e3;
            imgno++;
            enabled = 0;
        }
        else
        {
            for (int num = 0; num < numPCAs; ++num)
            {
                if (num == 0)
                {
                    A1D_ELEM(ZscoreSNR1,imgno) = pcaAnalyzer[num].getZscore(imgno);
                }
                else if (num == 1)
                {
                    A1D_ELEM(ZscoreShape2,imgno) = pcaAnalyzer[num].getZscore(imgno);
                }
                else if (num == 2)
                {
                    A1D_ELEM(ZscoreShape1,imgno) = pcaAnalyzer[num].getZscore(imgno);
                }
                else if (num == 3)
                {
                    A1D_ELEM(ZscoreSNR2,imgno) = pcaAnalyzer[num].getZscore(imgno);
                }
                else
                {
                    A1D_ELEM(ZscoreHist,imgno) = pcaAnalyzer[num].getZscore(imgno);
                }

                if(zScore < pcaAnalyzer[num].getZscore(imgno))
                    zScore = pcaAnalyzer[num].getZscore(imgno);
            }

            A1D_ELEM(finalZscore,imgno)=zScore;
            imgno++;
            zScore = 0;
        }
    }
    pcaAnalyzer.clear();

    // Produce output .....................................................
    MetaData SFout;
    std::ofstream fh_zind;

    if (verbose==2 && !fn_out.empty())
        fh_zind.open((fn_out.withoutExtension() + "_vectors.xmd").c_str(), std::ios::out);

    MultidimArray<int> sorted;
    finalZscore.indexSort(sorted);

    int nr_imgs = SF.size();
    bool thereIsEnable=SF.containsLabel(MDL_ENABLED);
    MDRow row;

    for (int imgno = 0; imgno < nr_imgs; imgno++)
    {
        int isort_1 = DIRECT_A1D_ELEM(sorted,imgno);
        int isort = isort_1 - 1;
        SF.getRow(row, isort_1);

        if (thereIsEnable)
        {
            int enabled;
            row.getValue(MDL_ENABLED, enabled);
            if (enabled==-1)
                continue;
        }

        double zscore=DIRECT_A1D_ELEM(finalZscore,isort);
        double zscoreShape1=DIRECT_A1D_ELEM(ZscoreShape1,isort);
        double zscoreShape2=DIRECT_A1D_ELEM(ZscoreShape2,isort);
        double zscoreSNR1=DIRECT_A1D_ELEM(ZscoreSNR1,isort);
        double zscoreSNR2=DIRECT_A1D_ELEM(ZscoreSNR2,isort);
        double zscoreHist=DIRECT_A1D_ELEM(ZscoreHist,isort);

        DIRECT_A1D_ELEM(sortedZscoreShape1,imgno)=DIRECT_A1D_ELEM(ZscoreShape1,isort);
        DIRECT_A1D_ELEM(sortedZscoreShape2,imgno)=DIRECT_A1D_ELEM(ZscoreShape2,isort);
        DIRECT_A1D_ELEM(sortedZscoreSNR1,imgno)=DIRECT_A1D_ELEM(ZscoreSNR1,isort);
        DIRECT_A1D_ELEM(sortedZscoreSNR2,imgno)=DIRECT_A1D_ELEM(ZscoreSNR2,isort);
        DIRECT_A1D_ELEM(sortedZscoreHist,imgno)=DIRECT_A1D_ELEM(ZscoreHist,isort);

        if (zscore>cutoff && cutoff>0)
        {
            row.setValue(MDL_ENABLED,-1);
            if (addToInput)
                SF.setValue(MDL_ENABLED,-1,isort_1);
        }
        else
        {
            row.setValue(MDL_ENABLED,1);
            if (addToInput)
                SF.setValue(MDL_ENABLED,1,isort_1);
        }

        row.setValue(MDL_ZSCORE,zscore);
        row.setValue(MDL_ZSCORE_SHAPE1,zscoreShape1);
        row.setValue(MDL_ZSCORE_SHAPE2,zscoreShape2);
        row.setValue(MDL_ZSCORE_SNR1,zscoreSNR1);
        row.setValue(MDL_ZSCORE_SNR2,zscoreSNR2);
        row.setValue(MDL_ZSCORE_HISTOGRAM,zscoreHist);

        if (addToInput)
        {
            SF.setValue(MDL_ZSCORE,zscore,isort_1);
            SF.setValue(MDL_ZSCORE_SHAPE1,zscoreShape1,isort_1);
            SF.setValue(MDL_ZSCORE_SHAPE2,zscoreShape2,isort_1);
            SF.setValue(MDL_ZSCORE_SNR1,zscoreSNR1,isort_1);
            SF.setValue(MDL_ZSCORE_SNR2,zscoreSNR2,isort_1);
            SF.setValue(MDL_ZSCORE_HISTOGRAM,zscoreHist,isort_1);
        }

        SFout.addRow(row);
    }

    //Sorting taking into account a given percentage
    if (per > 0)
    {
        MultidimArray<int> sortedShape1,sortedShape2,sortedSNR1,sortedSNR2,sortedHist,
        sortedShapeSF1,sortedShapeSF2,sortedSNR1SF,sortedSNR2SF,sortedHistSF;

        sortedZscoreShape1.indexSort(sortedShape1);
        sortedZscoreShape2.indexSort(sortedShape2);
        sortedZscoreSNR1.indexSort(sortedSNR1);
        sortedZscoreSNR2.indexSort(sortedSNR2);
        sortedZscoreHist.indexSort(sortedHist);
        size_t numPartReject = (size_t)std::floor((per/100)*SF.size());

        for (size_t numPar = SF.size()-1; numPar > (SF.size()-numPartReject); --numPar)
        {
            int isort_1 = DIRECT_A1D_ELEM(sortedShape1,numPar);
            SFout.getRow(row, isort_1);
            row.setValue(MDL_ENABLED,-1);
            SFout.setRow(row,isort_1);

            isort_1 = DIRECT_A1D_ELEM(sortedShape2,numPar);
            SFout.getRow(row, isort_1);
            row.setValue(MDL_ENABLED,-1);
            SFout.setRow(row,isort_1);

            isort_1 = DIRECT_A1D_ELEM(sortedSNR1,numPar);
            SFout.getRow(row, isort_1);
            row.setValue(MDL_ENABLED,-1);
            SFout.setRow(row,isort_1);

            isort_1 = DIRECT_A1D_ELEM(sortedSNR2,numPar);
            SFout.getRow(row, isort_1);
            row.setValue(MDL_ENABLED,-1);
            SFout.setRow(row,isort_1);

            isort_1 = DIRECT_A1D_ELEM(sortedHist,numPar);
            SFout.getRow(row, isort_1);
            row.setValue(MDL_ENABLED,-1);
            SFout.setRow(row,isort_1);

            if (addToInput)
            {
                ZscoreShape1.indexSort(sortedShapeSF1);
                ZscoreShape2.indexSort(sortedShapeSF2);
                ZscoreSNR1.indexSort(sortedSNR1SF);
                ZscoreSNR2.indexSort(sortedSNR2SF);
                ZscoreHist.indexSort(sortedHistSF);

                isort_1 = DIRECT_A1D_ELEM(sortedShapeSF1,numPar);
                SF.getRow(row, isort_1);
                row.setValue(MDL_ENABLED,-1);
                SF.setRow(row,isort_1);

                isort_1 = DIRECT_A1D_ELEM(sortedShapeSF2,numPar);
                SF.getRow(row, isort_1);
                row.setValue(MDL_ENABLED,-1);
                SF.setRow(row,isort_1);

                isort_1 = DIRECT_A1D_ELEM(sortedSNR1SF,numPar);
                SF.getRow(row, isort_1);
                row.setValue(MDL_ENABLED,-1);
                SF.setRow(row,isort_1);

                isort_1 = DIRECT_A1D_ELEM(sortedSNR2SF,numPar);
                SF.getRow(row, isort_1);
                row.setValue(MDL_ENABLED,-1);
                SF.setRow(row,isort_1);

                isort_1 = DIRECT_A1D_ELEM(sortedHistSF,numPar);
                SF.getRow(row, isort_1);
                row.setValue(MDL_ENABLED,-1);
                SF.setRow(row,isort_1);
            }
        }
    }

    if (verbose==2)
        fh_zind.close();
    if (!fn_out.empty())
    {
        MetaData SFsorted;
        SFsorted.sort(SFout,MDL_ZSCORE);
        SFout.write(fn_out,MD_OVERWRITE);
    }
    if (addToInput)
    {
        MetaData SFsorted;
        SFsorted.sort(SF,MDL_ZSCORE);
        SFsorted.write(fn,MD_APPEND);
    }
}
示例#15
0
void ParticlePolisherMpi::calculateAllSingleFrameReconstructionsAndBfactors()
{

	FileName fn_star = fn_in.withoutExtension() + "_" + fn_out + "_bfactors.star";
	if (!do_start_all_over && readStarFileBfactors(fn_star))
	{
		if (verb > 0)
			std::cout << " + " << fn_star << " already exists: skipping calculation average of per-frame B-factors." <<std::endl;
		return;
	}

	DOUBLE bfactor, offset, corr_coeff;

	int total_nr_frames = last_frame - first_frame + 1;
	long int my_first_frame, my_last_frame, my_nr_frames;

	// Loop over all frames (two halves for each frame!) to be included in the reconstruction
	// Each node does part of the work
	divide_equally(2*total_nr_frames, node->size, node->rank, my_first_frame, my_last_frame);
	my_nr_frames = my_last_frame - my_first_frame + 1;

	if (verb > 0)
	{
		std::cout << " + Calculating per-frame reconstructions ... " << std::endl;
		init_progress_bar(my_nr_frames);
	}

	for (long int i = my_first_frame; i <= my_last_frame; i++)
	{

		int iframe = (i >= total_nr_frames) ? i - total_nr_frames : i;
		iframe += first_frame;
		int ihalf = (i >= total_nr_frames) ? 2 : 1;

		calculateSingleFrameReconstruction(iframe, ihalf);

    	if (verb > 0)
    		progress_bar(i - my_first_frame + 1);
	}

	if (verb > 0)
	{
		progress_bar(my_nr_frames);
	}

	MPI_Barrier(MPI_COMM_WORLD);

	// Also calculate the average of all single-frames for both halves
    if (node->rank == 0)
    	calculateAverageAllSingleFrameReconstructions(1);
    else if (node->rank == 1)
    	calculateAverageAllSingleFrameReconstructions(2);

	// Wait until all reconstructions have been done, and calculate the B-factors per-frame
	MPI_Barrier(MPI_COMM_WORLD);

	calculateBfactorSingleFrameReconstruction(-1, bfactor, offset, corr_coeff); // FSC between the two averages, also reads mask

	MPI_Barrier(MPI_COMM_WORLD);

	// Loop over all frames (two halves for each frame!) to be included in the reconstruction
	// Each node does part of the work
	divide_equally(total_nr_frames, node->size, node->rank, my_first_frame, my_last_frame);
	my_nr_frames = my_last_frame - my_first_frame + 1;

	if (verb > 0)
	{
		std::cout << " + Calculating per-frame B-factors ... " << std::endl;
		init_progress_bar(my_nr_frames);
	}

	for (long int i = first_frame+my_first_frame; i <= first_frame+my_last_frame; i++)
	{

		calculateBfactorSingleFrameReconstruction(i, bfactor, offset, corr_coeff);
		int iframe = i - first_frame;
		DIRECT_A1D_ELEM(perframe_bfactors, iframe * 3 + 0) = bfactor;
       	DIRECT_A1D_ELEM(perframe_bfactors, iframe * 3 + 1) = offset;
       	DIRECT_A1D_ELEM(perframe_bfactors, iframe * 3 + 2) = corr_coeff;

    	if (verb > 0)
    		progress_bar(i - first_frame - my_first_frame + 1);
	}

	// Combine results from all nodes
	MultidimArray<DOUBLE> allnodes_perframe_bfactors;
	allnodes_perframe_bfactors.resize(perframe_bfactors);
	MPI_Allreduce(MULTIDIM_ARRAY(perframe_bfactors), MULTIDIM_ARRAY(allnodes_perframe_bfactors), MULTIDIM_SIZE(perframe_bfactors), MY_MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
	perframe_bfactors = allnodes_perframe_bfactors;

	if (verb > 0)
	{
		progress_bar(my_nr_frames);
		writeStarFileBfactors(fn_star);

	    // Also write a STAR file with the relative contributions of each frame to all frequencies
	    fn_star = fn_in.withoutExtension() + "_" + fn_out + "_relweights.star";
	    writeStarFileRelativeWeights(fn_star);
	}


}
示例#16
0
/* Assign probability ------------------------------------------------------ */
double LeafNode::assignProbability(double value, int k) const
{
    const IrregularHistogram1D& hist=__leafPDF[k];
    int index = hist.val2Index(value);
    return DIRECT_A1D_ELEM(hist.__hist,index);
}
示例#17
0
void Postprocessing::run()
{

	// Read inout maps and perform some checks
	initialise();

	// Calculate FSC of the unmask maps
	getFSC(I1(), I2(), fsc_unmasked);

	// Check whether we'll do masking
	do_mask = getMask();
	if (do_mask)
	{
		if (verb > 0)
		{
			std::cout <<"== Masking input maps ..." <<std::endl;
		}
		// Mask I1 and I2 and calculated fsc_masked
		I1() *= Im();
		I2() *= Im();
		getFSC(I1(), I2(), fsc_masked);

		// To save memory re-read the same input maps again and randomize phases before masking
		I1.read(fn_I1);
		I2.read(fn_I2);
		I1().setXmippOrigin();
		I2().setXmippOrigin();

		// Check at which resolution shell the FSC drops below randomize_fsc_at
		int randomize_at = -1;
		FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY1D(fsc_unmasked)
		{
			if (i > 0 && DIRECT_A1D_ELEM(fsc_unmasked, i) < randomize_fsc_at)
			{
				randomize_at = i;
				break;
			}
		}
		if (verb > 0)
		{
			std::cout.width(35); std::cout << std::left << "  + randomize phases beyond: "; std::cout << XSIZE(I1())* angpix / randomize_at << " Angstroms" << std::endl;
		}
		if (randomize_at > 0)
		{
			randomizePhasesBeyond(I1(), randomize_at);
			randomizePhasesBeyond(I2(), randomize_at);
			// Mask randomized phases maps and calculated fsc_random_masked
			I1() *= Im();
			I2() *= Im();
			getFSC(I1(), I2(), fsc_random_masked);

		}
		else
			REPORT_ERROR("Postprocessing::run ERROR: FSC curve never drops below randomize_fsc_at.  You may want to check your mask.");

		// Now that we have fsc_masked and fsc_random_masked, calculate fsc_true according to Richard's formula
		// FSC_true = FSC_t - FSC_n / ( )
		fsc_true.resize(fsc_masked);
		FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY1D(fsc_true)
		{
                    // 29jan2015: let's move this 2 shells upwards, because of small artefacts near the resolution of randomisation!
			if (i < randomize_at + 2)
			{
				DIRECT_A1D_ELEM(fsc_true, i) = DIRECT_A1D_ELEM(fsc_masked, i);
			}
			else
			{
				DOUBLE fsct = DIRECT_A1D_ELEM(fsc_masked, i);
				DOUBLE fscn = DIRECT_A1D_ELEM(fsc_random_masked, i);
				if (fscn > fsct)
					DIRECT_A1D_ELEM(fsc_true, i) = 0.;
				else
					DIRECT_A1D_ELEM(fsc_true, i) = (fsct - fscn) / (1. - fscn);
			}
		}

		// Now re-read the original maps yet again into memory
		I1.read(fn_I1);
		I2.read(fn_I2);
		I1().setXmippOrigin();
		I2().setXmippOrigin();

	}
	else
	{
示例#18
0
/* Do inference ------------------------------------------------------------ */
int NaiveBayes::doInference(const MultidimArray<double> &newFeatures, double &cost,
                            Matrix1D<double> &classesProbs, Matrix1D<double> &allCosts)
{
    classesProbs=__priorProbsLog10;
    for(int f=0; f<Nfeatures; f++)
    {
        const LeafNode &leaf_f=*(__leafs[f]);
        double newFeatures_f=DIRECT_A1D_ELEM(newFeatures,f);
        for (int k=0; k<K; k++)
        {
            double p = leaf_f.assignProbability(newFeatures_f, k);

            if (fabs(p) < 1e-2)
                VEC_ELEM(classesProbs,k) += -2*DIRECT_A1D_ELEM(__weights,f);
            else
                VEC_ELEM(classesProbs,k) += DIRECT_A1D_ELEM(__weights,f)*std::log10(p);

#ifdef DEBUG_FINE_CLASSIFICATION

            if(debugging == true)
            {
                std::cout << "Feature " << f
                << " Probability for class " << k << " = "
                << classesProbs(k) << " increase= " << p
                << std::endl;
                char c;
                // COSS                    std::cin >> c;
                //                    if (c=='q') debugging = false;
            }
#endif

        }
    }

    classesProbs-=classesProbs.computeMax();
    //    std::cout << "classesProbs " << classesProbs.transpose() << std::endl;

    for (int k=0; k<K; k++)
        VEC_ELEM(classesProbs,k)=pow(10.0,VEC_ELEM(classesProbs,k));
    classesProbs*=1.0/classesProbs.sum();
    //    std::cout << "classesProbs norm " << classesProbs.transpose() << std::endl;

    allCosts=__cost*classesProbs;
    //    std::cout << "allCosts " << allCosts.transpose() << std::endl;

    int bestk=0;
    cost=VEC_ELEM(allCosts,0)=std::log10(VEC_ELEM(allCosts,0));
    for (int k=1; k<K; k++)
    {
        VEC_ELEM(allCosts,k)=std::log10(VEC_ELEM(allCosts,k));
        if (VEC_ELEM(allCosts,k)<cost)
        {
            cost=VEC_ELEM(allCosts,k);
            bestk=k;
        }
    }

#ifdef DEBUG_CLASSIFICATION
    if(debugging == true)
    {
        for (int k=0; k<K; k++)
            classesProbs(k)=log10(classesProbs(k));
        std::cout << "Class probababilities=" << classesProbs.transpose()
        << "\n  costs=" << allCosts.transpose()
        << "  best class=" << bestk << " cost=" << cost << std::endl;
        char c;
        // COSS std::cin >> c;
        // if (c=='q') debugging = false;
    }
#endif
    return bestk;
}
示例#19
0
// Fill data array with oversampled Fourier transform, and calculate its power spectrum
void Projector::computeFourierTransformMap(MultidimArray<DOUBLE> &vol_in, MultidimArray<DOUBLE> &power_spectrum, int current_size, int nr_threads, bool do_gridding)
{

	MultidimArray<DOUBLE> Mpad;
	MultidimArray<Complex > Faux;
    FourierTransformer transformer;
    // DEBUGGING: multi-threaded FFTWs are giving me a headache?
	// For a long while: switch them off!
	//transformer.setThreadsNumber(nr_threads);
    DOUBLE normfft;

	// Size of padded real-space volume
	int padoridim = padding_factor * ori_size;

	// Initialize data array of the oversampled transform
	ref_dim = vol_in.getDim();

	// Make Mpad
	switch (ref_dim)
	{
	case 2:
	   Mpad.initZeros(padoridim, padoridim);
	   normfft = (DOUBLE)(padding_factor * padding_factor);
	   break;
	case 3:
	   Mpad.initZeros(padoridim, padoridim, padoridim);
	   if (data_dim ==3)
		   normfft = (DOUBLE)(padding_factor * padding_factor * padding_factor);
	   else
		   normfft = (DOUBLE)(padding_factor * padding_factor * padding_factor * ori_size);
	   break;
	default:
	   REPORT_ERROR("Projector::computeFourierTransformMap%%ERROR: Dimension of the data array should be 2 or 3");
	}

	// First do a gridding pre-correction on the real-space map:
	// Divide by the inverse Fourier transform of the interpolator in Fourier-space
	// 10feb11: at least in 2D case, this seems to be the wrong thing to do!!!
	// TODO: check what is best for subtomo!
	if (do_gridding)// && data_dim != 3)
		griddingCorrect(vol_in);

	// Pad translated map with zeros
	vol_in.setXmippOrigin();
	Mpad.setXmippOrigin();
	FOR_ALL_ELEMENTS_IN_ARRAY3D(vol_in) // This will also work for 2D
		A3D_ELEM(Mpad, k, i, j) = A3D_ELEM(vol_in, k, i, j);

	// Translate padded map to put origin of FT in the center
	CenterFFT(Mpad, true);

	// Calculate the oversampled Fourier transform
	transformer.FourierTransform(Mpad, Faux, false);

	// Free memory: Mpad no longer needed
	Mpad.clear();

	// Resize data array to the right size and initialise to zero
	initZeros(current_size);

	// Fill data only for those points with distance to origin less than max_r
	// (other points will be zero because of initZeros() call above
	// Also calculate radial power spectrum
	power_spectrum.initZeros(ori_size / 2 + 1);
	MultidimArray<DOUBLE> counter(power_spectrum);
	counter.initZeros();

	int max_r2 = r_max * r_max * padding_factor * padding_factor;
	FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(Faux) // This will also work for 2D
	{
		int r2 = kp*kp + ip*ip + jp*jp;
		// The Fourier Transforms are all "normalised" for 2D transforms of size = ori_size x ori_size
		if (r2 <= max_r2)
		{
			// Set data array
			A3D_ELEM(data, kp, ip, jp) = DIRECT_A3D_ELEM(Faux, k, i, j) * normfft;

			// Calculate power spectrum
			int ires = ROUND( sqrt((DOUBLE)r2) / padding_factor );
			// Factor two because of two-dimensionality of the complex plane
			DIRECT_A1D_ELEM(power_spectrum, ires) += norm(A3D_ELEM(data, kp, ip, jp)) / 2.;
			DIRECT_A1D_ELEM(counter, ires) += 1.;
		}
	}

	// Calculate radial average of power spectrum
	FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY1D(power_spectrum)
	{
		if (DIRECT_A1D_ELEM(counter, i) < 1.)
			DIRECT_A1D_ELEM(power_spectrum, i) = 0.;
		else
			DIRECT_A1D_ELEM(power_spectrum, i) /= DIRECT_A1D_ELEM(counter, i);
	}

	transformer.cleanup();

}
示例#20
0
EnsembleNaiveBayes::EnsembleNaiveBayes(
    const std::vector < MultidimArray<double> >  &features,
    const Matrix1D<double> &priorProbs,
    int discreteLevels, int numberOfClassifiers,
    double samplingFeatures, double samplingIndividuals,
    const std::string &newJudgeCombination)
{
    int NFeatures=XSIZE(features[0]);
    int NsubFeatures=CEIL(NFeatures*samplingFeatures);
    K=features.size();
    judgeCombination=newJudgeCombination;

#ifdef WEIGHTED_SAMPLING
    // Measure the classification power of each variable
    NaiveBayes *nb_weights=new NaiveBayes(features, priorProbs, discreteLevels);
    MultidimArray<double> weights=nb_weights->__weights;
    delete nb_weights;
    double sumWeights=weights.sum();
#endif

    for (int n=0; n<numberOfClassifiers; n++)
    {
        // Produce the set of features for this subclassifier
        MultidimArray<int> subFeatures(NsubFeatures);
        FOR_ALL_ELEMENTS_IN_ARRAY1D(subFeatures)
        {
#ifdef WEIGHTED_SAMPLING
            double random_sum_weight=rnd_unif(0,sumWeights);
            int j=0;
            do
            {
                double wj=DIRECT_A1D_ELEM(weights,j);
                if (wj<random_sum_weight)
                {
                    random_sum_weight-=wj;
                    j++;
                    if (j==NFeatures)
                    {
                        j=NFeatures-1;
                        break;
                    }
                }
                else
                    break;
            }
            while (true);
            DIRECT_A1D_ELEM(subFeatures,i)=j;
#else

            DIRECT_A1D_ELEM(subFeatures,i)=round(rnd_unif(0,NFeatures-1));
#endif

        }

        // Container for the new training sample
        std::vector< MultidimArray<double> >  newFeatures;

        // Produce the data set for each class
        for (int k=0; k<K; k++)
        {
            int NIndividuals=YSIZE(features[k]);
            int NsubIndividuals=CEIL(NIndividuals*samplingIndividuals);
            MultidimArray<int> subIndividuals(NsubIndividuals);
            FOR_ALL_ELEMENTS_IN_ARRAY1D(subIndividuals)
            subIndividuals(i)=ROUND(rnd_unif(0,NsubIndividuals-1));

            MultidimArray<double> newFeaturesK;
            newFeaturesK.initZeros(NsubIndividuals,NsubFeatures);
            const MultidimArray<double>& features_k=features[k];
            FOR_ALL_ELEMENTS_IN_ARRAY2D(newFeaturesK)
            DIRECT_A2D_ELEM(newFeaturesK,i,j)=DIRECT_A2D_ELEM(features_k,
                                              DIRECT_A1D_ELEM(subIndividuals,i),
                                              DIRECT_A1D_ELEM(subFeatures,j));

            newFeatures.push_back(newFeaturesK);
        }

        // Create a Naive Bayes classifier with this data
        NaiveBayes *nb=new NaiveBayes(newFeatures, priorProbs, discreteLevels);
        ensemble.push_back(nb);
        ensembleFeatures.push_back(subFeatures);
    }
}
示例#21
0
文件: fftw.cpp 项目: shy3u/GeRelion
void correctMapForMTF(MultidimArray<Complex >& FT, int ori_size, FileName& fn_mtf)
{

	MetaDataTable MDmtf;

	if (!fn_mtf.isStarFile())
	{
		REPORT_ERROR("correctMapForMTF ERROR: input MTF file is not a STAR file.");
	}

	MDmtf.read(fn_mtf);
	MultidimArray<double> mtf_resol, mtf_value;
	mtf_resol.resize(MDmtf.numberOfObjects());
	mtf_value.resize(mtf_resol);

	int i = 0;
	FOR_ALL_OBJECTS_IN_METADATA_TABLE(MDmtf)
	{
		MDmtf.getValue(EMDL_RESOLUTION_INVPIXEL, DIRECT_A1D_ELEM(mtf_resol, i));  // resolution needs to be given in 1/pix
		MDmtf.getValue(EMDL_POSTPROCESS_MTF_VALUE, DIRECT_A1D_ELEM(mtf_value, i));
		if (DIRECT_A1D_ELEM(mtf_value, i) < 1e-10)
		{
			std::cerr << " i= " << i <<  " mtf_value[i]= " << DIRECT_A1D_ELEM(mtf_value, i) << std::endl;
			REPORT_ERROR("Postprocessing::sharpenMap ERROR: zero or negative values encountered in MTF curve!");
		}
		i++;
	}

	double xsize = (double)ori_size;
	FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(FT)
	{
		int r2 = kp * kp + ip * ip + jp * jp;
		double res = sqrt((double)r2) / xsize; // get resolution in 1/pixel
		if (res < 0.5)
		{

			// Find the suitable MTF value
			int i_0 = 0;
			for (int ii = 0; ii < XSIZE(mtf_resol); ii++)
			{
				if (DIRECT_A1D_ELEM(mtf_resol, ii) > res)
				{
					break;
				}
				i_0 = ii;
			}
			// linear interpolation: y = y_0 + (y_1 - y_0)*(x-x_0)/(x1_x0)
			double mtf;
			double x_0 = DIRECT_A1D_ELEM(mtf_resol, i_0);
			if (i_0 == MULTIDIM_SIZE(mtf_resol) - 1 || i_0 == 0) // check boundaries of the array
			{
				mtf = DIRECT_A1D_ELEM(mtf_value, i_0);
			}
			else
			{
				double x_1 = DIRECT_A1D_ELEM(mtf_resol, i_0 + 1);
				double y_0 = DIRECT_A1D_ELEM(mtf_value, i_0);
				double y_1 = DIRECT_A1D_ELEM(mtf_value, i_0 + 1);
				mtf = y_0 + (y_1 - y_0) * (res - x_0) / (x_1 - x_0);
			}

			// Divide Fourier component by the MTF
			DIRECT_A3D_ELEM(FT, k, i, j) /= mtf;
		}
	}



}