示例#1
0
/* Filter generation ------------------------------------------------------- */
void Steerable::generate1DFilters(double sigma,
    const MultidimArray<double> &Vtomograph,
    std::vector< MultidimArray<double> > &hx1,
    std::vector< MultidimArray<double> > &hy1,
    std::vector< MultidimArray<double> > &hz1){

    // Initialization 
    MultidimArray<double> aux;
    aux.initZeros(XSIZE(Vtomograph));
    aux.setXmippOrigin();
    for (int i=0; i<6; i++) hx1.push_back(aux);
    
    aux.initZeros(YSIZE(Vtomograph));
    aux.setXmippOrigin();
    for (int i=0; i<6; i++) hy1.push_back(aux);

    aux.initZeros(ZSIZE(Vtomograph));
    aux.setXmippOrigin();
    for (int i=0; i<6; i++) hz1.push_back(aux);

    double sigma2=sigma*sigma;       
    double k1 =  1.0/pow((2.0*PI*sigma),(3.0/2.0));
    double k2 = -1.0/(sigma2);
    
    FOR_ALL_ELEMENTS_IN_ARRAY1D(hx1[0])
    {        
        double i2=i*i;
        double g = -exp(-i2/(2.0*sigma2));
	hx1[0](i) = k1*k2*g*(1.0-(i2/sigma2));
	hx1[1](i) = k1*k2*g;
	hx1[2](i) = k1*k2*g;
	hx1[3](i) = k1*k2*k2*g*i;
	hx1[4](i) = k1*k2*k2*g*i;
	hx1[5](i) = k1*k2*k2*g;
    }    
    FOR_ALL_ELEMENTS_IN_ARRAY1D(hy1[0])
    {
        double i2=i*i;
        double g = -exp(-i2/(2.0*sigma2));
        hy1[0](i) = g;
        hy1[1](i) = g*(1.0-(i2/sigma2));
        hy1[2](i) = g;
        hy1[3](i) = g*i;
        hy1[4](i) = g;
        hy1[5](i) = g*i;
    }
    FOR_ALL_ELEMENTS_IN_ARRAY1D(hz1[0])
    {
        double i2=i*i;
        double g = -exp(-i2/(2.0*sigma2));
	hz1[0](i) = g;
	hz1[1](i) = g;
	hz1[2](i) = g*(1.0-(i2/sigma2));
	hz1[3](i) = g;
	hz1[4](i) = g*i;
	hz1[5](i) = g*i;
    }
}
    // Computes the average of a number of frames in movies
    void computeAvg(const FileName &movieFile, int begin, int end, MultidimArray<double> &avgImg)
    {
        ImageGeneric movieStack;
        MultidimArray<double> frameImage, shiftedFrame;
        Matrix1D<double> shiftMatrix(2);
        int N=end-begin+1;

        for (size_t i=begin;i<=end;i++)
        {
            movieStack.readMapped(movieFile,i);
            movieStack().getImage(frameImage);
            if (i==begin)
                avgImg.initZeros(YSIZE(frameImage), XSIZE(frameImage));
            if (darkImageCorr)
                frameImage-=darkImage;
            if (gainImageCorr)
                frameImage/=gainImage;
            if (globalShiftCorr)
            {
                XX(shiftMatrix)=XX(shiftVector[i-1]);
                YY(shiftMatrix)=YY(shiftVector[i-1]);
                translate(LINEAR, shiftedFrame, frameImage, shiftMatrix, WRAP);
                avgImg+=shiftedFrame;
            }
            else
                avgImg+=frameImage;
        }
        avgImg/=double(N);
        frameImage.clear();
        movieStack.clear();
    }
示例#3
0
文件: fftw.cpp 项目: shy3u/GeRelion
void getSpectrum(MultidimArray<double>& Min,
                 MultidimArray<double>& spectrum,
                 int spectrum_type)
{

	MultidimArray<Complex > Faux;
	int xsize = XSIZE(Min);
	Matrix1D<double> f(3);
	MultidimArray<double> count(xsize);
	FourierTransformer transformer;

	spectrum.initZeros(xsize);
	count.initZeros();
	transformer.FourierTransform(Min, Faux, false);
	FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(Faux)
	{
		long int idx = ROUND(sqrt(kp * kp + ip * ip + jp * jp));
		if (spectrum_type == AMPLITUDE_SPECTRUM)
		{
			spectrum(idx) += abs(dAkij(Faux, k, i, j));
		}
		else
		{
			spectrum(idx) += norm(dAkij(Faux, k, i, j));
		}
		count(idx) += 1.;
	}

	for (long int i = 0; i < xsize; i++)
		if (count(i) > 0.)
		{
			spectrum(i) /= count(i);
		}

}
 // Converts an OpenCV matrix to XMIPP MultidimArray
 void opencv2Xmipp(const cv::Mat &opencvMat, MultidimArray<double> &xmippArray)
 {
     int h = opencvMat.rows;
     int w = opencvMat.cols;
     xmippArray.initZeros(h, w);
     FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY2D(xmippArray)
     DIRECT_A2D_ELEM(xmippArray,i,j) = opencvMat.at<float>(i,j);
 }
示例#5
0
void Steerable::generate3DFilter(MultidimArray<double>& h3D,
    std::vector< MultidimArray<double> > &hx1,
    std::vector< MultidimArray<double> > &hy1,
    std::vector< MultidimArray<double> > &hz1)
{
    h3D.initZeros(XSIZE(hz1[0]),XSIZE(hy1[0]),XSIZE(hx1[0]));
    h3D.setXmippOrigin();
    FOR_ALL_ELEMENTS_IN_ARRAY3D(h3D)
        for (int n=0; n<6; n++)
            h3D(k,i,j)+=(hz1[n](k)*hy1[n](i)*hx1[n](j));    
}
示例#6
0
/* Do inference ------------------------------------------------------------ */
int EnsembleNaiveBayes::doInference(const Matrix1D<double> &newFeatures,
                                    double &cost, MultidimArray<int> &votes,
                                    Matrix1D<double> &classesProbs, Matrix1D<double> &allCosts)
{
    int nmax=ensemble.size();
    MultidimArray<double> minCost, maxCost;
    votes.initZeros(K);
    minCost.initZeros(K);
    minCost.initConstant(1);
    maxCost.initZeros(K);
    maxCost.initConstant(1);
    double bestMinCost=0;
    int bestClass=0;
    MultidimArray<double> newFeaturesn;
    for (int n=0; n<nmax; n++)
    {
        double costn;
        newFeaturesn.initZeros(XSIZE(ensembleFeatures[n]));
        FOR_ALL_ELEMENTS_IN_ARRAY1D(newFeaturesn)
        newFeaturesn(i)=newFeatures(ensembleFeatures[n](i));
        int k=ensemble[n]->doInference(newFeaturesn, costn, classesProbs, allCosts);
        votes(k)++;
        if (minCost(k)>0 || minCost(k)>costn)
            minCost(k)=costn;
        if (maxCost(k)>0 || maxCost(k)<costn)
            maxCost(k)=costn;
        if (minCost(k)<bestMinCost)
        {
            bestMinCost=minCost(k);
            bestClass=k;
        }
    }
    if      (judgeCombination[bestClass]=='m')
        cost=minCost(bestClass);
    else if (judgeCombination[bestClass]=='M')
        cost=maxCost(bestClass);
    else
        cost=minCost(bestClass);
    return bestClass;
}
示例#7
0
void MlModel::initialise()
{

	// Auxiliary vector with relevant size in Fourier space
	MultidimArray<double > aux;
	aux.initZeros(ori_size / 2 + 1);

	// Now resize all relevant vectors
	Iref.resize(nr_classes);
	pdf_class.resize(nr_classes, 1. / (double)nr_classes);
	pdf_direction.resize(nr_classes);
	group_names.resize(nr_groups, "");
	sigma2_noise.resize(nr_groups, aux);
	nr_particles_group.resize(nr_groups);
	tau2_class.resize(nr_classes, aux);
	fsc_halves_class.resize(nr_classes, aux);
	sigma2_class.resize(nr_classes, aux);
	data_vs_prior_class.resize(nr_classes, aux);
	// TODO handle these two correctly.
	bfactor_correction.resize(nr_groups, 0.);
	scale_correction.resize(nr_groups, 1.);

	acc_rot.resize(nr_classes, 0);
	acc_trans.resize(nr_classes, 0);

	if (ref_dim == 2)
	{
		Matrix1D<double> empty(2);
		prior_offset_class.resize(nr_classes, empty);
	}
	// These arrays will be resized when they are filled
	orientability_contrib.resize(nr_classes);

	Projector ref(ori_size, interpolator, padding_factor, r_min_nn);
	PPref.clear();
	// Now fill the entire vector with instances of "ref"
	PPref.resize(nr_classes, ref);

}
示例#8
0
文件: fftw.cpp 项目: shy3u/GeRelion
// Fourier ring correlation -----------------------------------------------
// from precalculated Fourier Transforms, and without sampling rate etc.
void getFSC(MultidimArray< Complex >& FT1,
            MultidimArray< Complex >& FT2,
            MultidimArray< double >& fsc)
{
	if (!FT1.sameShape(FT2))
	{
		REPORT_ERROR("fourierShellCorrelation ERROR: MultidimArrays have different shapes!");
	}

	MultidimArray< int > radial_count(XSIZE(FT1));
	MultidimArray<double> num, den1, den2;
	Matrix1D<double> f(3);
	num.initZeros(radial_count);
	den1.initZeros(radial_count);
	den2.initZeros(radial_count);
	fsc.initZeros(radial_count);
	FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(FT1)
	{
		int idx = ROUND(sqrt(kp * kp + ip * ip + jp * jp));
		if (idx >= XSIZE(FT1))
		{
			continue;
		}
		Complex z1 = DIRECT_A3D_ELEM(FT1, k, i, j);
		Complex z2 = DIRECT_A3D_ELEM(FT2, k, i, j);
		double absz1 = abs(z1);
		double absz2 = abs(z2);
		num(idx) += (conj(z1) * z2).real;
		den1(idx) += absz1 * absz1;
		den2(idx) += absz2 * absz2;
		radial_count(idx)++;
	}

	FOR_ALL_ELEMENTS_IN_ARRAY1D(fsc)
	{
		fsc(i) = num(i) / sqrt(den1(i) * den2(i));
	}

}
    void run()
    {
        // Get angles ==============================================================
        MetaData angles;
        angles.read(fnIn);
        size_t AngleNo = angles.size();
        if (AngleNo == 0 || !angles.containsLabel(MDL_ANGLE_ROT))
            REPORT_ERROR(ERR_MD_BADLABEL, "Input file doesn't contain angular information");

        double maxWeight = -99.e99;
        MultidimArray<double> weight;
        weight.initZeros(AngleNo);
        if (angles.containsLabel(MDL_WEIGHT))
        {
            // Find maximum weight
            int i=0;
            FOR_ALL_OBJECTS_IN_METADATA(angles)
            {
                double w;
                angles.getValue(MDL_WEIGHT,w,__iter.objId);
                DIRECT_A1D_ELEM(weight,i++)=w;
                maxWeight=XMIPP_MAX(w,maxWeight);
            }
        }
示例#10
0
void ProgVolumePCA::run()
{
    show();
    produce_side_info();

    const MultidimArray<int> &imask=mask.imask;
    size_t Nvoxels=imask.sum();
    MultidimArray<float> v;
    v.initZeros(Nvoxels);

    // Add all volumes to the analyzer
    FileName fnVol;
    FOR_ALL_OBJECTS_IN_METADATA(mdVols)
    {
        mdVols.getValue(MDL_IMAGE,fnVol,__iter.objId);
        V.read(fnVol);

        // Construct vector
        const MultidimArray<double> &mV=V();
        size_t idx=0;
        FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(mV)
        {
            if (DIRECT_MULTIDIM_ELEM(imask,n))
                DIRECT_MULTIDIM_ELEM(v,idx++)=DIRECT_MULTIDIM_ELEM(mV,n);
        }

        analyzer.addVector(v);
    }

    // Construct PCA basis
    analyzer.subtractAvg();
    analyzer.learnPCABasis(NPCA,100);

    // Project onto the PCA basis
    Matrix2D<double> proj;
    analyzer.projectOnPCABasis(proj);
    std::vector<double> dimredProj;
    dimredProj.resize(NPCA);
    int i=0;
    FOR_ALL_OBJECTS_IN_METADATA(mdVols)
    {
        memcpy(&dimredProj[0],&MAT_ELEM(proj,i,0),NPCA*sizeof(double));
        mdVols.setValue(MDL_DIMRED,dimredProj,__iter.objId);
        i++;
    }
    if (fnVolsOut!="")
        mdVols.write(fnVolsOut);
    else
        mdVols.write(fnVols);

    // Save the basis
    const MultidimArray<double> &mV=V();
    for (int i=NPCA-1; i>=0; --i)
    {
        V().initZeros();
        size_t idx=0;
        const MultidimArray<double> &mPCA=analyzer.PCAbasis[i];
        FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(mV)
        {
            if (DIRECT_MULTIDIM_ELEM(imask,n))
                DIRECT_MULTIDIM_ELEM(mV,n)=DIRECT_MULTIDIM_ELEM(mPCA,idx++);
        }
        if (fnBasis!="")
            V.write(fnBasis,i+1,true,WRITE_OVERWRITE);
    }

    // Generate the PCA volumes
    if (listOfPercentiles.size()>0 && fnOutStack!="" && fnAvgVol!="")
    {
        Image<double> Vavg;
        if (fnAvgVol!="")
            Vavg.read(fnAvgVol);
        else
            Vavg().initZeros(V());

        Matrix1D<double> p;
        proj.toVector(p);
        Matrix1D<double> psorted=p.sort();

        Image<double> Vpca;
        Vpca()=Vavg();
        createEmptyFile(fnOutStack,(int)XSIZE(Vavg()),(int)YSIZE(Vavg()),(int)ZSIZE(Vavg()),listOfPercentiles.size());
        std::cout << "listOfPercentiles.size()=" << listOfPercentiles.size() << std::endl;
        for (size_t i=0; i<listOfPercentiles.size(); i++)
        {
            int idx=(int)round(textToFloat(listOfPercentiles[i].c_str())/100.0*VEC_XSIZE(p));
            std::cout << "Percentile " << listOfPercentiles[i] << " -> idx=" << idx << " p(idx)=" << psorted(idx) << std::endl;
            Vpca()+=psorted(idx)*V();
            Vpca.write(fnOutStack,i+1,true,WRITE_REPLACE);
        }
    }
}
示例#11
0
EnsembleNaiveBayes::EnsembleNaiveBayes(
    const std::vector < MultidimArray<double> >  &features,
    const Matrix1D<double> &priorProbs,
    int discreteLevels, int numberOfClassifiers,
    double samplingFeatures, double samplingIndividuals,
    const std::string &newJudgeCombination)
{
    int NFeatures=XSIZE(features[0]);
    int NsubFeatures=CEIL(NFeatures*samplingFeatures);
    K=features.size();
    judgeCombination=newJudgeCombination;

#ifdef WEIGHTED_SAMPLING
    // Measure the classification power of each variable
    NaiveBayes *nb_weights=new NaiveBayes(features, priorProbs, discreteLevels);
    MultidimArray<double> weights=nb_weights->__weights;
    delete nb_weights;
    double sumWeights=weights.sum();
#endif

    for (int n=0; n<numberOfClassifiers; n++)
    {
        // Produce the set of features for this subclassifier
        MultidimArray<int> subFeatures(NsubFeatures);
        FOR_ALL_ELEMENTS_IN_ARRAY1D(subFeatures)
        {
#ifdef WEIGHTED_SAMPLING
            double random_sum_weight=rnd_unif(0,sumWeights);
            int j=0;
            do
            {
                double wj=DIRECT_A1D_ELEM(weights,j);
                if (wj<random_sum_weight)
                {
                    random_sum_weight-=wj;
                    j++;
                    if (j==NFeatures)
                    {
                        j=NFeatures-1;
                        break;
                    }
                }
                else
                    break;
            }
            while (true);
            DIRECT_A1D_ELEM(subFeatures,i)=j;
#else

            DIRECT_A1D_ELEM(subFeatures,i)=round(rnd_unif(0,NFeatures-1));
#endif

        }

        // Container for the new training sample
        std::vector< MultidimArray<double> >  newFeatures;

        // Produce the data set for each class
        for (int k=0; k<K; k++)
        {
            int NIndividuals=YSIZE(features[k]);
            int NsubIndividuals=CEIL(NIndividuals*samplingIndividuals);
            MultidimArray<int> subIndividuals(NsubIndividuals);
            FOR_ALL_ELEMENTS_IN_ARRAY1D(subIndividuals)
            subIndividuals(i)=ROUND(rnd_unif(0,NsubIndividuals-1));

            MultidimArray<double> newFeaturesK;
            newFeaturesK.initZeros(NsubIndividuals,NsubFeatures);
            const MultidimArray<double>& features_k=features[k];
            FOR_ALL_ELEMENTS_IN_ARRAY2D(newFeaturesK)
            DIRECT_A2D_ELEM(newFeaturesK,i,j)=DIRECT_A2D_ELEM(features_k,
                                              DIRECT_A1D_ELEM(subIndividuals,i),
                                              DIRECT_A1D_ELEM(subFeatures,j));

            newFeatures.push_back(newFeaturesK);
        }

        // Create a Naive Bayes classifier with this data
        NaiveBayes *nb=new NaiveBayes(newFeatures, priorProbs, discreteLevels);
        ensemble.push_back(nb);
        ensembleFeatures.push_back(subFeatures);
    }
}
示例#12
0
//#define DEBUG
void normalize_tomography(MultidimArray<double> &I, double tilt, double &mui,
                          double &sigmai, bool tiltMask, bool tomography0,
                          double mu0, double sigma0)
{
    // Tilt series are always 2D
    I.checkDimension(2);

    const int L=2;

    // Build a mask using the tilt angle
    I.setXmippOrigin();
    MultidimArray<int> mask;
    mask.initZeros(I);
    int Xdimtilt=(int)XMIPP_MIN(FLOOR(0.5*(XSIZE(I)*cos(DEG2RAD(tilt)))),
                           0.5*(XSIZE(I)-(2*L+1)));
    int N=0;
    for (int i=STARTINGY(I); i<=FINISHINGY(I); i++)
        for (int j=-Xdimtilt; j<=Xdimtilt;j++)
        {
            A2D_ELEM(mask,i,j)=1;
            N++;
        }

    // Estimate the local variance
    MultidimArray<double> localVariance;
    localVariance.initZeros(I);
    double meanVariance=0;
    FOR_ALL_ELEMENTS_IN_ARRAY2D(I)
    {
        // Center a mask of size 5x5 and estimate the variance within the mask
        double meanPiece=0, variancePiece=0;
        double Npiece=0;
        for (int ii=i-L; ii<=i+L; ii++)
        {
            if (INSIDEY(I,ii))
                for (int jj=j-L; jj<=j+L; jj++)
                {
                    if (INSIDEX(I,jj))
                    {
                        double pixval=A2D_ELEM(I,ii,jj);
                        meanPiece+=pixval;
                        variancePiece+=pixval*pixval;
                        ++Npiece;
                    }
                }
        }
        meanPiece/=Npiece;
        variancePiece=variancePiece/(Npiece-1)-
                      Npiece/(Npiece-1)*meanPiece*meanPiece;
        A2D_ELEM(localVariance,i,j)=variancePiece;
        meanVariance+=variancePiece;
    }
    meanVariance*=1.0/N;

    // Test the hypothesis that the variance in this piece is
    // the same as the variance in the whole image
    double iFu=1/icdf_FSnedecor(4*L*L+4*L,N-1,0.975);
    double iFl=1/icdf_FSnedecor(4*L*L+4*L,N-1,0.025);
    double iMeanVariance=1.0/meanVariance;
    FOR_ALL_ELEMENTS_IN_ARRAY2D(localVariance)
    {
        if (A2D_ELEM(localVariance,i,j)==0)
            A2D_ELEM(mask,i,j)=-2;
        if (A2D_ELEM(mask,i,j)==1)
        {
            double ratio=A2D_ELEM(localVariance,i,j)*iMeanVariance;
            double thl=ratio*iFu;
            double thu=ratio*iFl;
            if (thl>1 || thu<1)
                A2D_ELEM(mask,i,j)=-1;
        }
    }
#ifdef DEBUG
    Image<double> save;
    save()=I;
    save.write("PPP.xmp");
    save()=localVariance;
    save.write("PPPLocalVariance.xmp");
    Image<int> savemask;
    savemask()=mask;
    savemask.write("PPPmask.xmp");
#endif

    // Compute the statistics again in the reduced mask
    double avg, stddev, min, max;
    computeStats_within_binary_mask(mask, I, min, max, avg, stddev);
    double cosTilt=cos(DEG2RAD(tilt));
    double iCosTilt=1.0/cosTilt;
    if (tomography0)
    {
        double iadjustedStddev=1.0/(sigma0*cosTilt);
        FOR_ALL_ELEMENTS_IN_ARRAY2D(I)
        switch (A2D_ELEM(mask,i,j))
        {
        case -2:
            A2D_ELEM(I,i,j)=0;
            break;
        case 0:
            if (tiltMask)
                A2D_ELEM(I,i,j)=0;
            else
                A2D_ELEM(I,i,j)=(A2D_ELEM(I,i,j)*iCosTilt-mu0)*iadjustedStddev;
            break;
        case -1:
        case 1:
            A2D_ELEM(I,i,j)=(A2D_ELEM(I,i,j)*iCosTilt-mu0)*iadjustedStddev;
            break;
        }
    }
    else
    {
    int main2()
    {

        MultidimArray<double> preImg, avgCurr, mappedImg;
        MultidimArray<double> outputMovie;
        Matrix1D<double> meanStdev;
        ImageGeneric movieStack;
        Image<double> II;
        MetaData MD; // To save plot information
        FileName motionInfFile, flowFileName, flowXFileName, flowYFileName;
        ArrayDim aDim;

        // For measuring times (both for whole process and for each level of the pyramid)
        clock_t tStart, tStart2;

#ifdef GPU
        // Matrix that we required in GPU part
        GpuMat d_flowx, d_flowy, d_dest;
        GpuMat d_avgcurr, d_preimg;
#endif

        // Matrix required by Opencv
        cv::Mat flow, dest, flowx, flowy;
        cv::Mat flowxPre, flowyPre;
        cv::Mat avgcurr, avgstep, preimg, preimg8, avgcurr8;
        cv::Mat planes[]={flowxPre, flowyPre};

        int imagenum, cnt=2, div=0, flowCounter;
        int h, w, levelNum, levelCounter=1;

        motionInfFile=foname.replaceExtension("xmd");
        std::string extension=fname.getExtension();
        if (extension=="mrc")
            fname+=":mrcs";
        movieStack.read(fname,HEADER);
        movieStack.getDimensions(aDim);
        imagenum = aDim.ndim;
        h = aDim.ydim;
        w = aDim.xdim;
        if (darkImageCorr)
        {
            II.read(darkRefFilename);
            darkImage=II();
        }
        if (gainImageCorr)
        {
            II.read(gianRefFilename);
            gainImage=II();
        }
        meanStdev.initZeros(4);
        //avgcurr=cv::Mat::zeros(h, w,CV_32FC1);
        avgCurr.initZeros(h, w);
        flowxPre=cv::Mat::zeros(h, w,CV_32FC1);
        flowyPre=cv::Mat::zeros(h, w,CV_32FC1);
#ifdef GPU

        // Object for optical flow
        FarnebackOpticalFlow d_calc;
        setDevice(gpuDevice);

        // Initialize the parameters for optical flow structure
        d_calc.numLevels=6;
        d_calc.pyrScale=0.5;
        d_calc.fastPyramids=true;
        d_calc.winSize=winSize;
        d_calc.numIters=1;
        d_calc.polyN=5;
        d_calc.polySigma=1.1;
        d_calc.flags=0;
#endif
        // Initialize the stack for the output movie
        if (saveCorrMovie)
            outputMovie.initZeros(imagenum, 1, h, w);
        // Correct for global motion from a cross-correlation based algorithms
        if (globalShiftCorr)
        {
            Matrix1D<double> shiftMatrix(2);
            shiftVector.reserve(imagenum);
            shiftMD.read(globalShiftFilename);
            FOR_ALL_OBJECTS_IN_METADATA(shiftMD)
            {
                shiftMD.getValue(MDL_SHIFT_X, XX(shiftMatrix), __iter.objId);
                shiftMD.getValue(MDL_SHIFT_Y, YY(shiftMatrix), __iter.objId);
                shiftVector.push_back(shiftMatrix);
            }
        }
        tStart2=clock();
        // Compute the average of the whole stack
        fstFrame++; // Just to adapt to Li algorithm
        lstFrame++; // Just to adapt to Li algorithm
        if (lstFrame>=imagenum || lstFrame==1)
            lstFrame=imagenum;
        imagenum=lstFrame-fstFrame+1;
        levelNum=sqrt(double(imagenum));
        computeAvg(fname, fstFrame, lstFrame, avgCurr);
        // if the user want to save the PSD
        if (doAverage)
        {
            II()=avgCurr;
            II.write(foname);
            return 0;
        }
        xmipp2Opencv(avgCurr, avgcurr);
        cout<<"Frames "<<fstFrame<<" to "<<lstFrame<<" under processing ..."<<std::endl;
        while (div!=groupSize)
        {
            div=int(imagenum/cnt);
            // avgStep to hold the sum of aligned frames of each group at each step
            avgstep=cv::Mat::zeros(h, w,CV_32FC1);

            cout<<"Level "<<levelCounter<<"/"<<levelNum<<" of the pyramid is under processing"<<std::endl;
            // Compute time for each level
            tStart = clock();

            // Check if we are in the final step
            if (div==1)
                cnt=imagenum;
            flowCounter=1;
            for (int i=0;i<cnt;i++)
            {
                //Just compute the average in the last step
                if (div==1)
                {
                    if (globalShiftCorr)
                    {
                        Matrix1D<double> shiftMatrix(2);
                        MultidimArray<double> frameImage;
                        movieStack.readMapped(fname,i+1);
                        movieStack().getImage(frameImage);
                        if (darkImageCorr)
                        	frameImage-=darkImage;
                        if (gainImageCorr)
                        	frameImage/=gainImage;
                        XX(shiftMatrix)=XX(shiftVector[i]);
                        YY(shiftMatrix)=YY(shiftVector[i]);
                        translate(BSPLINE3, preImg, frameImage, shiftMatrix, WRAP);
                    }
                    else
                    {
                        movieStack.readMapped(fname,fstFrame+i);
                        movieStack().getImage(preImg);
                        if (darkImageCorr)
                            preImg-=darkImage;
                        if (gainImageCorr)
                            preImg/=gainImage;
                    }
                    xmipp2Opencv(preImg, preimg);
                }
                else
                {
                    if (i==cnt-1)
                        computeAvg(fname, i*div+fstFrame, lstFrame, preImg);
                    else
                        computeAvg(fname, i*div+fstFrame, (i+1)*div+fstFrame-1, preImg);
                }
                xmipp2Opencv(preImg, preimg);
                // Note: we should use the OpenCV conversion to use it in optical flow
                convert2Uint8(avgcurr,avgcurr8);
                convert2Uint8(preimg,preimg8);
#ifdef GPU

                d_avgcurr.upload(avgcurr8);
                d_preimg.upload(preimg8);
                if (cnt==2)
                    d_calc(d_avgcurr, d_preimg, d_flowx, d_flowy);
                else
                {
                    flowXFileName=foname.removeLastExtension()+formatString("flowx%d%d.txt",div*2,flowCounter);
                    flowYFileName=foname.removeLastExtension()+formatString("flowy%d%d.txt",div*2,flowCounter);
                    readMat(flowXFileName.c_str(), flowx);
                    readMat(flowYFileName.c_str(), flowy);
                    d_flowx.upload(flowx);
                    d_flowy.upload(flowy);
                    d_calc.flags=cv::OPTFLOW_USE_INITIAL_FLOW;
                    d_calc(d_avgcurr, d_preimg, d_flowx, d_flowy);
                }
                d_flowx.download(planes[0]);
                d_flowy.download(planes[1]);
                d_avgcurr.release();
                d_preimg.release();
                d_flowx.release();
                d_flowy.release();
#else

                if (cnt==2)
                    calcOpticalFlowFarneback(avgcurr8, preimg8, flow, 0.5, 6, winSize, 1, 5, 1.1, 0);
                else
                {
                    flowFileName=foname.removeLastExtension()+formatString("flow%d%d.txt",div*2,flowCounter);
                    readMat(flowFileName.c_str(), flow);
                    calcOpticalFlowFarneback(avgcurr8, preimg8, flow, 0.5, 6, winSize, 1, 5, 1.1, cv::OPTFLOW_USE_INITIAL_FLOW);
                }
                split(flow, planes);

#endif
                // Save the flows if we are in the last step
                if (div==groupSize)
                {
                    if (i > 0)
                    {
                        std_dev2(planes,flowxPre,flowyPre,meanStdev);
                        size_t id=MD.addObject();
                        MD.setValue(MDL_OPTICALFLOW_MEANX, double(meanStdev(0)), id);
                        MD.setValue(MDL_OPTICALFLOW_MEANY, double(meanStdev(2)), id);
                        MD.setValue(MDL_OPTICALFLOW_STDX, double(meanStdev(1)), id);
                        MD.setValue(MDL_OPTICALFLOW_STDY, double(meanStdev(3)), id);
                        MD.write(motionInfFile, MD_APPEND);
                    }
                    planes[0].copyTo(flowxPre);
                    planes[1].copyTo(flowyPre);
                }
                else
                {
#ifdef GPU
                    flowXFileName=foname.removeLastExtension()+formatString("flowx%d%d.txt",div,i+1);
                    flowYFileName=foname.removeLastExtension()+formatString("flowy%d%d.txt",div,i+1);
                    saveMat(flowXFileName.c_str(), planes[0]);
                    saveMat(flowYFileName.c_str(), planes[1]);
#else

                    flowFileName=foname.removeLastExtension()+formatString("flow%d%d.txt",div,i+1);
                    saveMat(flowFileName.c_str(), flow);
#endif

                    if ((i+1)%2==0)
                        flowCounter++;
                }
                for( int row = 0; row < planes[0].rows; row++ )
                    for( int col = 0; col < planes[0].cols; col++ )
                    {
                        planes[0].at<float>(row,col) += (float)col;
                        planes[1].at<float>(row,col) += (float)row;
                    }
                cv::remap(preimg, dest, planes[0], planes[1], cv::INTER_CUBIC);
                if (div==1 && saveCorrMovie)
                {
                    mappedImg.aliasImageInStack(outputMovie, i);
                    opencv2Xmipp(dest, mappedImg);
                }
                avgstep+=dest;
            }
            avgcurr=avgstep/cnt;
            cout<<"Processing level "<<levelCounter<<"/"<<levelNum<<" has been finished"<<std::endl;
            printf("Processing time: %.2fs\n", (double)(clock() - tStart)/CLOCKS_PER_SEC);
            cnt*=2;
            levelCounter++;
        }
        opencv2Xmipp(avgcurr, avgCurr);
        II() = avgCurr;
        II.write(foname);
        printf("Total Processing time: %.2fs\n", (double)(clock() - tStart2)/CLOCKS_PER_SEC);
        if (saveCorrMovie)
        {
            II()=outputMovie;
            II.write(foname.replaceExtension("mrcs"));
        }

        // Release the memory
        avgstep.release();
        preimg.release();
        avgcurr8.release();
        preimg8.release();
        flow.release();
        planes[0].release();
        planes[1].release();
        flowxPre.release();
        flowyPre.release();
        movieStack.clear();
        preImg.clear();
        avgCurr.clear();
        II.clear();
        return 0;
    }
void ProgSortByStatistics::processInputPrepare(MetaData &SF)
{
    PCAMahalanobisAnalyzer tempPcaAnalyzer;
    tempPcaAnalyzer.clear();

    Image<double> img;
    MultidimArray<double> img2;
    MultidimArray<int> radial_count;
    MultidimArray<double> radial_avg;
    Matrix1D<int> center(2);
    center.initZeros();

    if (verbose>0)
        std::cout << " Processing training set ..." << std::endl;

    int nr_imgs = SF.size();
    if (verbose>0)
        init_progress_bar(nr_imgs);
    int c = XMIPP_MAX(1, nr_imgs / 60);
    int imgno = 0, imgnoPCA=0;
    MultidimArray<float> v;
    MultidimArray<int> distance;
    int dim;

    bool thereIsEnable=SF.containsLabel(MDL_ENABLED);
    bool first=true;
    FOR_ALL_OBJECTS_IN_METADATA(SF)
    {
        if (thereIsEnable)
        {
            int enabled;
            SF.getValue(MDL_ENABLED,enabled,__iter.objId);
            if (enabled==-1)
                continue;
        }
        img.readApplyGeo(SF,__iter.objId);
        if (targetXdim!=-1 && targetXdim!=XSIZE(img()))
        	selfScaleToSize(LINEAR,img(),targetXdim,targetXdim,1);
        MultidimArray<double> &mI=img();
        mI.setXmippOrigin();
        mI.statisticsAdjust(0,1);

        // Overall statistics
        Histogram1D hist;
        compute_hist(mI,hist,-4,4,31);

        // Radial profile
        img2.resizeNoCopy(mI);
        FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(img2)
        {
            double val=DIRECT_MULTIDIM_ELEM(mI,n);
            DIRECT_MULTIDIM_ELEM(img2,n)=val*val;
        }
        if (first)
        {
            radialAveragePrecomputeDistance(img2, center, distance, dim);
            first=false;
        }
        fastRadialAverage(img2, distance, dim, radial_avg, radial_count);

        // Build vector
        v.initZeros(XSIZE(hist)+XSIZE(img2)/2);
        int idx=0;
        FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY1D(hist)
        v(idx++)=(float)DIRECT_A1D_ELEM(hist,i);
        for (size_t i=0; i<XSIZE(img2)/2; i++)
            v(idx++)=(float)DIRECT_A1D_ELEM(radial_avg,i);

        tempPcaAnalyzer.addVector(v);

        if (imgno % c == 0 && verbose>0)
            progress_bar(imgno);
        imgno++;
        imgnoPCA++;
    }
    if (verbose>0)
        progress_bar(nr_imgs);

    MultidimArray<double> vavg,vstddev;
    tempPcaAnalyzer.computeStatistics(vavg,vstddev);
    tempPcaAnalyzer.evaluateZScore(2,20,false);
    pcaAnalyzer.insert(pcaAnalyzer.begin(), tempPcaAnalyzer);
}
示例#15
0
void Steerable::singleFilter(const MultidimArray<double>& Vin,
    MultidimArray<double> &hx1, MultidimArray<double> &hy1, MultidimArray<double> &hz1,
    MultidimArray<double> &Vout){

    MultidimArray< std::complex<double> > H, Aux;
    Vout.initZeros(Vin);

    // Filter in X
    #define MINUS_ONE_POWER(n) (((n)%2==0)? 1:-1)
    FourierTransformer transformer;
    transformer.FourierTransform(hx1,H);
    
    FOR_ALL_ELEMENTS_IN_ARRAY1D(H)
          H(i)*= MINUS_ONE_POWER(i);

    FourierTransformer transformer2;
    
    MultidimArray<double> aux(XSIZE(Vin));
        	   
    transformer2.setReal(aux);		   
		   
    for (size_t k=0; k<ZSIZE(Vin); k++)
        for (size_t i=0; i<YSIZE(Vin); i++)
        {
            for (size_t j=0; j<XSIZE(Vin); j++)
                DIRECT_A1D_ELEM(aux,j)=DIRECT_A3D_ELEM(Vin,k,i,j);
			    
	    transformer2.FourierTransform( );	    
	    transformer2.getFourierAlias( Aux );
	    Aux*=H;
	    transformer2.inverseFourierTransform( );
            	    
	    for (size_t j=0; j<XSIZE(Vin); j++)
                DIRECT_A3D_ELEM(Vout,k,i,j)=XSIZE(aux)*DIRECT_A1D_ELEM(aux,j);
        }

    // Filter in Y
    transformer.FourierTransform(hy1,H);
    
    FOR_ALL_ELEMENTS_IN_ARRAY1D(H)
          H(i)*= MINUS_ONE_POWER(i);

    aux.initZeros(YSIZE(Vin));
    transformer2.setReal(aux);		   
    
    for (size_t k=0; k<ZSIZE(Vin); k++)
        for (size_t j=0; j<XSIZE(Vin); j++)
        {
            for (size_t i=0; i<YSIZE(Vin); i++)
                DIRECT_A1D_ELEM(aux,i)=DIRECT_A3D_ELEM(Vout,k,i,j);

	    transformer2.FourierTransform( );	    
	    transformer2.getFourierAlias( Aux );
	    Aux*=H;
	    transformer2.inverseFourierTransform( );
            
	    for (size_t i=0; i<YSIZE(Vin); i++)
                DIRECT_A3D_ELEM(Vout,k,i,j)=XSIZE(aux)*DIRECT_A1D_ELEM(aux,i);
        }

    // Filter in Z

    transformer.FourierTransform(hz1,H);

    FOR_ALL_ELEMENTS_IN_ARRAY1D(H)
          H(i)*= MINUS_ONE_POWER(i);

    aux.initZeros(ZSIZE(Vin));    
    transformer2.setReal(aux);		   

    for (size_t i=0; i<YSIZE(Vin); i++)
        for (size_t j=0; j<XSIZE(Vin); j++)
        {
            for (size_t k=0; k<ZSIZE(Vin); k++)
                DIRECT_A1D_ELEM(aux,k)=DIRECT_A3D_ELEM(Vout,k,i,j);

	    transformer2.FourierTransform( );	    
	    transformer2.getFourierAlias( Aux );
	    Aux*=H;
	    transformer2.inverseFourierTransform( );

            for (size_t k=0; k<ZSIZE(Vin); k++)
                DIRECT_A3D_ELEM(Vout,k,i,j)=XSIZE(aux)*DIRECT_A1D_ELEM(aux,k);
        }
    
    // If Missing wedge
    if (MW!=NULL)
        MW->removeWedge(Vout);
}
示例#16
0
文件: fftw.cpp 项目: shy3u/GeRelion
/** Kullback-Leibner divergence */
double getKullbackLeibnerDivergence(MultidimArray<Complex >& Fimg,
                                    MultidimArray<Complex >& Fref, MultidimArray<double>& sigma2,
                                    MultidimArray<double>& p_i, MultidimArray<double>& q_i, int highshell, int lowshell)
{
	// First check dimensions are OK
	if (!Fimg.sameShape(Fref))
	{
		REPORT_ERROR("getKullbackLeibnerDivergence ERROR: Fimg and Fref are not of the same shape.");
	}

	if (highshell < 0)
	{
		highshell = XSIZE(Fimg) - 1;
	}
	if (lowshell < 0)
	{
		lowshell = 0;
	}

	if (highshell > XSIZE(sigma2))
	{
		REPORT_ERROR("getKullbackLeibnerDivergence ERROR: highshell is larger than size of sigma2 array.");
	}

	if (highshell < lowshell)
	{
		REPORT_ERROR("getKullbackLeibnerDivergence ERROR: highshell is smaller than lowshell.");
	}

	// Initialize the histogram
	MultidimArray<int> histogram;
	int histogram_size = 101;
	int histogram_origin = histogram_size / 2;
	double sigma_max = 10.;
	double histogram_factor = histogram_origin / sigma_max;
	histogram.initZeros(histogram_size);

	// This way this will work in both 2D and 3D
	FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(Fimg)
	{
		int ires = ROUND(sqrt(kp * kp + ip * ip + jp * jp));
		if (ires >= lowshell && ires <= highshell)
		{
			// Use FT of masked image for noise estimation!
			double diff_real = (DIRECT_A3D_ELEM(Fref, k, i, j)).real - (DIRECT_A3D_ELEM(Fimg, k, i, j)).real;
			double diff_imag = (DIRECT_A3D_ELEM(Fref, k, i, j)).imag - (DIRECT_A3D_ELEM(Fimg, k, i, j)).imag;
			double sigma = sqrt(DIRECT_A1D_ELEM(sigma2, ires));

			// Divide by standard deviation to normalise all the difference
			diff_real /= sigma;
			diff_imag /= sigma;

			// Histogram runs from -10 sigma to +10 sigma
			diff_real += sigma_max;
			diff_imag += sigma_max;

			// Make histogram on-the-fly;
			// Real part
			int ihis = ROUND(diff_real * histogram_factor);
			if (ihis < 0)
			{
				ihis = 0;
			}
			else if (ihis >= histogram_size)
			{
				ihis = histogram_size - 1;
			}
			histogram(ihis)++;
			// Imaginary part
			ihis = ROUND(diff_imag * histogram_factor);
			if (ihis < 0)
			{
				ihis = 0;
			}
			else if (ihis > histogram_size)
			{
				ihis = histogram_size;
			}
			histogram(ihis)++;

		}
	}

	// Normalise the histogram and the discretised analytical Gaussian
	double norm = (double)histogram.sum();
	double gaussnorm = 0.;
	for (int i = 0; i < histogram_size; i++)
	{
		double x = (double)i / histogram_factor;
		gaussnorm += gaussian1D(x - sigma_max, 1. , 0.);
	}

	// Now calculate the actual Kullback-Leibner divergence
	double kl_divergence = 0.;
	p_i.resize(histogram_size);
	q_i.resize(histogram_size);
	for (int i = 0; i < histogram_size; i++)
	{
		// Data distribution
		p_i(i) = (double)histogram(i) / norm;
		// Theoretical distribution
		double x = (double)i / histogram_factor;
		q_i(i) = gaussian1D(x - sigma_max, 1. , 0.) / gaussnorm;

		if (p_i(i) > 0.)
		{
			kl_divergence += p_i(i) * log(p_i(i) / q_i(i));
		}
	}
	kl_divergence /= (double)histogram_size;

	return kl_divergence;

}
示例#17
0
//#define DEBUG
void ProgResolutionIBW::run()
{
    V.read(fnVol);

    //Mask generation
    Image<double> aux;
    double bg_mean;
    MultidimArray<double> Vmask;
    detectBackground(V(),aux(),0.1,bg_mean);
#ifdef DEBUG

    aux.write("PPPmask_no_ero_03.vol");
#endif

    //Mask volume erosion to expand the mask boundaries
    Vmask.initZeros(V());
    erode3D(aux(),Vmask, 18,0,2);

    //Correction of some flaws produced in the edges of the mask volume
    FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY3D(Vmask)
    if (k<=4 || i<=4 || j<=4 ||
        k>=ZSIZE(Vmask)-4 || i>=YSIZE(Vmask)-4 || j>=XSIZE(Vmask)-4)
        DIRECT_A3D_ELEM(Vmask,k,i,j)=1;

    aux()=Vmask;
#ifdef DEBUG

    aux.write("PPPmask_ero_03.vol");
#endif

    //Sobel edge detection applied to original volume
    Image<double> Vedge;
    computeEdges(V(),Vedge());
#ifdef DEBUG

    Vedge.write("PPPvolume_sobel_unmask_03.vol");
#endif

    //Masked volume generation
    const MultidimArray<double> &mVedge=Vedge();
    FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(mVedge)
    if (DIRECT_MULTIDIM_ELEM(Vmask,n)==1)
        DIRECT_MULTIDIM_ELEM(mVedge,n)=0;
#ifdef DEBUG

    Vedge.write("volume_sobel_mask_03.vol");
#endif

    double minval, maxval, avg, stddev;

    //Invert the mask to meet computeStats_within_binary_mask requirements
    FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(Vmask)
    if (DIRECT_MULTIDIM_ELEM(Vmask,n)==1)
        DIRECT_MULTIDIM_ELEM(Vmask,n)=0;
    else
        DIRECT_MULTIDIM_ELEM(Vmask,n)=1;

    //Threshold is 3 times the standard deviation of unmasked pixel values
    double thresh;
    computeStats_within_binary_mask(Vmask,mVedge,minval, maxval, avg, stddev);
    thresh=3*stddev;

    //Final edge volume generated by setting to 1 positions with values > threshold
    Image<double> Vaux;
    Vaux().initZeros(mVedge);
    FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(mVedge)
    if (DIRECT_MULTIDIM_ELEM(mVedge,n)>=thresh)
        DIRECT_MULTIDIM_ELEM(Vaux(),n)=1;

#ifdef DEBUG

    Vaux.write("volumen_bordes_definitivo_03.vol");
#endif

    const MultidimArray<double> &mVaux=Vaux();

    //Spline coefficient volume from original volume, to allow <1 step sizes
    MultidimArray<double> Volcoeffs;
    Volcoeffs.initZeros(V());

    produceSplineCoefficients(3,Volcoeffs,V());

    //Width parameter volume initialization
    Image<double> widths;
    widths().resizeNoCopy(V());
    widths().initConstant(1e5);
    double step=0.25;

    Matrix1D<double> direction(3);

    //Calculation of edge width for 10 different directions, if a smaller value is found for a different
    //direction on a given position the former value is overwritten

    //Direction (1,0,0)
    VECTOR_R3(direction,1,0,0);
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

    //Direction (0,1,0)
    VECTOR_R3(direction,0,1,0);
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

    //Direction (0,0,1)
    VECTOR_R3(direction,0,0,1);
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

    //Direction (1,1,0)
    VECTOR_R3(direction,(1/sqrt(2)),(1/sqrt(2)),0);
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

    //Direction (1,0,1)
    VECTOR_R3(direction,(1/sqrt(2)),0,(1/sqrt(2)));
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

    //Direction (0,1,1)
    VECTOR_R3(direction,0,(1/sqrt(2)),(1/sqrt(2)));
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

    //Direction (1,1,1)
    VECTOR_R3(direction,(1/sqrt(3)),(1/sqrt(3)),(1/sqrt(3)));
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

    //Direction (-1,1,1)
    VECTOR_R3(direction,-(1/sqrt(3)),(1/sqrt(3)),(1/sqrt(3)));
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

    //Direction (1,1,-1)
    VECTOR_R3(direction,(1/sqrt(3)),(1/sqrt(3)),-(1/sqrt(3)));
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

    //Direction (1,-1,1)
    VECTOR_R3(direction,(1/sqrt(3)),-(1/sqrt(3)),(1/sqrt(3)));
    edgeWidth(Volcoeffs, mVaux, widths(), direction, step);

#ifdef DEBUG

    std::cout << "width stats: ";
    widths().printStats();
    std::cout << std::endl;
    widths.write("PPPwidths.vol");
#endif

    double ibw=calculateIBW(widths());
    std::cout << "Resolution ibw= " << ibw << std::endl;
    if (fnOut!="")
    	widths.write(fnOut);
}
示例#18
0
// Fill data array with oversampled Fourier transform, and calculate its power spectrum
void Projector::computeFourierTransformMap(MultidimArray<DOUBLE> &vol_in, MultidimArray<DOUBLE> &power_spectrum, int current_size, int nr_threads, bool do_gridding)
{

	MultidimArray<DOUBLE> Mpad;
	MultidimArray<Complex > Faux;
    FourierTransformer transformer;
    // DEBUGGING: multi-threaded FFTWs are giving me a headache?
	// For a long while: switch them off!
	//transformer.setThreadsNumber(nr_threads);
    DOUBLE normfft;

	// Size of padded real-space volume
	int padoridim = padding_factor * ori_size;

	// Initialize data array of the oversampled transform
	ref_dim = vol_in.getDim();

	// Make Mpad
	switch (ref_dim)
	{
	case 2:
	   Mpad.initZeros(padoridim, padoridim);
	   normfft = (DOUBLE)(padding_factor * padding_factor);
	   break;
	case 3:
	   Mpad.initZeros(padoridim, padoridim, padoridim);
	   if (data_dim ==3)
		   normfft = (DOUBLE)(padding_factor * padding_factor * padding_factor);
	   else
		   normfft = (DOUBLE)(padding_factor * padding_factor * padding_factor * ori_size);
	   break;
	default:
	   REPORT_ERROR("Projector::computeFourierTransformMap%%ERROR: Dimension of the data array should be 2 or 3");
	}

	// First do a gridding pre-correction on the real-space map:
	// Divide by the inverse Fourier transform of the interpolator in Fourier-space
	// 10feb11: at least in 2D case, this seems to be the wrong thing to do!!!
	// TODO: check what is best for subtomo!
	if (do_gridding)// && data_dim != 3)
		griddingCorrect(vol_in);

	// Pad translated map with zeros
	vol_in.setXmippOrigin();
	Mpad.setXmippOrigin();
	FOR_ALL_ELEMENTS_IN_ARRAY3D(vol_in) // This will also work for 2D
		A3D_ELEM(Mpad, k, i, j) = A3D_ELEM(vol_in, k, i, j);

	// Translate padded map to put origin of FT in the center
	CenterFFT(Mpad, true);

	// Calculate the oversampled Fourier transform
	transformer.FourierTransform(Mpad, Faux, false);

	// Free memory: Mpad no longer needed
	Mpad.clear();

	// Resize data array to the right size and initialise to zero
	initZeros(current_size);

	// Fill data only for those points with distance to origin less than max_r
	// (other points will be zero because of initZeros() call above
	// Also calculate radial power spectrum
	power_spectrum.initZeros(ori_size / 2 + 1);
	MultidimArray<DOUBLE> counter(power_spectrum);
	counter.initZeros();

	int max_r2 = r_max * r_max * padding_factor * padding_factor;
	FOR_ALL_ELEMENTS_IN_FFTW_TRANSFORM(Faux) // This will also work for 2D
	{
		int r2 = kp*kp + ip*ip + jp*jp;
		// The Fourier Transforms are all "normalised" for 2D transforms of size = ori_size x ori_size
		if (r2 <= max_r2)
		{
			// Set data array
			A3D_ELEM(data, kp, ip, jp) = DIRECT_A3D_ELEM(Faux, k, i, j) * normfft;

			// Calculate power spectrum
			int ires = ROUND( sqrt((DOUBLE)r2) / padding_factor );
			// Factor two because of two-dimensionality of the complex plane
			DIRECT_A1D_ELEM(power_spectrum, ires) += norm(A3D_ELEM(data, kp, ip, jp)) / 2.;
			DIRECT_A1D_ELEM(counter, ires) += 1.;
		}
	}

	// Calculate radial average of power spectrum
	FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY1D(power_spectrum)
	{
		if (DIRECT_A1D_ELEM(counter, i) < 1.)
			DIRECT_A1D_ELEM(power_spectrum, i) = 0.;
		else
			DIRECT_A1D_ELEM(power_spectrum, i) /= DIRECT_A1D_ELEM(counter, i);
	}

	transformer.cleanup();

}