void ProgEvaluateClass::run()
{
	MetaData MD((String)"classes@"+fnClass), MDclass;
	ClassEvaluation eval;
	if (verbose>0)
		init_progress_bar(MD.size());
	int idx=0;
	FOR_ALL_OBJECTS_IN_METADATA(MD)
	{
		int classNo;
		MD.getValue(MDL_REF,classNo,__iter.objId);
		MDclass.read(formatString("class%06d_images@%s",classNo,fnClass.c_str()));
		evaluateClass(MDclass,eval);
		MD.setValue(MDL_CLASSIFICATION_FRC_05,eval.FRC_05,__iter.objId);
		MD.setValue(MDL_CLASSIFICATION_DPR_05,eval.DPR_05,__iter.objId);
		idx++;
		if (verbose>0)
			progress_bar(idx);
	}
	if (verbose>0)
		progress_bar(MD.size());
	if (fnOut=="")
		fnOut=fnClass;
	MD.write((String)"classes@"+fnOut,MD_APPEND);
}
void ParticleSorterMpi::run()
{

	int total_nr_images = MDin.numberOfObjects();
	features.resize(total_nr_images, NR_FEATURES);

	// Each node does part of the work
	long int my_first_image, my_last_image, my_nr_images;
	divide_equally(total_nr_images, node->size, node->rank, my_first_image, my_last_image);
	my_nr_images = my_last_image - my_first_image + 1;

	int barstep;
	if (verb > 0)
	{
		std::cout << "Calculating sorting features for all input particles..." << std::endl;
		init_progress_bar(my_nr_images);
		barstep = XMIPP_MAX(1, my_nr_images/ 60);
	}

	long int ipart = 0;
	FOR_ALL_OBJECTS_IN_METADATA_TABLE(MDin)
	{

		if (ipart >= my_first_image && ipart <= my_last_image)
		{
			if (verb > 0 && ipart % barstep == 0)
				progress_bar(ipart);

			calculateFeaturesOneParticle(ipart);

		}
		ipart++;
	}

	if (verb > 0)
		progress_bar(my_nr_images);

	// Combine results from all nodes
	MultidimArray<double> allnodes_features;
	allnodes_features.resize(features);
	MPI_Allreduce(MULTIDIM_ARRAY(features), MULTIDIM_ARRAY(allnodes_features), MULTIDIM_SIZE(features), MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
	features = allnodes_features;

	// Only the master writes out files
	if (verb > 0)
	{
		normaliseFeatures();

		writeFeatures();
	}

}
void ParticlePolisherMpi::polishParticlesAllMicrographs()
{

	if (!do_start_all_over && exists(fn_out + ".star"))
	{
		if (verb > 0)
			std::cout << std::endl << " + " << fn_out << ".star already exists: skipping polishing of the particles." << std::endl;
		return;
	}

	int total_nr_micrographs = exp_model.average_micrographs.size();

	// Each node does part of the work
	long int my_first_micrograph, my_last_micrograph, my_nr_micrographs;
	divide_equally(total_nr_micrographs, node->size, node->rank, my_first_micrograph, my_last_micrograph);
	my_nr_micrographs = my_last_micrograph - my_first_micrograph + 1;

	// Loop over all average micrographs
	int barstep;
	if (verb > 0)
	{
		std::cout << " + Write out polished particles for all micrographs ... " << std::endl;
		init_progress_bar(my_nr_micrographs);
		barstep = XMIPP_MAX(1, my_nr_micrographs/ 60);
	}

    for (long int i = my_first_micrograph; i <= my_last_micrograph; i++)
	{
    	if (verb > 0 && i % barstep == 0)
			progress_bar(i);

    	polishParticlesOneMicrograph(i);
	}

   	if (verb > 0)
   		progress_bar(my_nr_micrographs);

    if (node->isMaster())
    	writeStarFilePolishedParticles();

    MPI_Barrier(MPI_COMM_WORLD);

}
// Outliers ===============================================================
void ProgClassifyCL2DCore::computeCores()
{
    if (verbose && node->rank==0)
        std::cerr << "Computing cores ...\n";
    ProgAnalyzeCluster analyzeCluster;
    analyzeCluster.verbose=0;
    analyzeCluster.NPCA=NPCA;
    analyzeCluster.Niter=10;
    analyzeCluster.distThreshold=thPCAZscore;
    analyzeCluster.dontMask=false;

    MetaData MD;
    size_t first, last;
    size_t Nblocks=blocks.size();
    if (verbose && node->rank==0)
        init_progress_bar(Nblocks);
    while (taskDistributor->getTasks(first, last))
        for (size_t idx=first; idx<=last; ++idx)
        {
            // Remove outliers in the PCA projection
            analyzeCluster.SFin.clear();
            analyzeCluster.fnSel=blocks[idx].block+"@"+blocks[idx].fnLevel;
            analyzeCluster.fnOut=blocks[idx].fnLevel.insertBeforeExtension((String)"_core_"+blocks[idx].block);
            analyzeCluster.run();

            // Remove outliers from file
            MD.read(analyzeCluster.fnOut);
            MD.removeDisabled();
            MD.write(analyzeCluster.fnOut,MD_APPEND);

            if (verbose && node->rank==0)
                progress_bar(idx);
        }
    taskDistributor->wait();
    if (verbose && node->rank==0)
        progress_bar(Nblocks);

    // Gather all results
    gatherResults(0,"core");
}
Exemple #5
0
void ProgXrayImport::run()
{
    // Delete output stack if it exists
    fnOut = fnRoot + ".mrc";
    fnOut.deleteFile();

    /* Turn off error handling */
    H5Eset_auto(H5E_DEFAULT, NULL, NULL);

    if (dSource == MISTRAL)
        H5File.openFile(fnInput, H5F_ACC_RDONLY);


    // Reading bad pixels mask
    if ( !fnBPMask.empty() )
    {
        std::cerr << "Reading bad pixels mask from "+fnBPMask << "." << std::endl;
        bpMask.read(fnBPMask);
        if ( (cropSizeX + cropSizeY ) > 0 )
            bpMask().selfWindow(cropSizeY,cropSizeX,
                                (int)(YSIZE(bpMask())-cropSizeY-1),(int)(XSIZE(bpMask())-cropSizeX-1));
        STARTINGX(bpMask()) = STARTINGY(bpMask()) = 0;
    }


    // Setting the image projections list
    switch (dSource)
    {
    case MISTRAL:
        {
            inMD.read(fnInput);
            H5File.getDataset("NXtomo/data/rotation_angle", anglesArray, false);
            H5File.getDataset("NXtomo/instrument/sample/ExpTimes", expTimeArray, false);
            H5File.getDataset("NXtomo/instrument/sample/current", cBeamArray);

            /* In case there is no angles information we set them to to an increasing sequence
             * just to be able to continue importing data */
            if ( anglesArray.size() != inMD.size() )
            {
                reportWarning("Input file does not contains angle information. Default sequence used.");
                anglesArray.resizeNoCopy(inMD.size());
                anglesArray.enumerate();
            }

            // If expTime is empty or only one single value in nexus file then we fill with 1
            if (expTimeArray.size() < 2)
            {
                reportWarning("Input file does not contains tomogram exposition time information.");
                expTimeArray.initConstant(anglesArray.size(), 1.);
            }
            // If current is empty or only one single value in nexus file then we fill with 1
            if (cBeamArray.size() < 2)
            {
                reportWarning("Input file does not contains tomogram current beam information.");
                cBeamArray.initConstant(anglesArray.size(), 1.);
            }
            // Since Alba does not provide slit width, we set to ones
            slitWidthArray.initConstant(anglesArray.size(), 1.);
        }
        break;
    case BESSY:
        {
            size_t objId;

            for (size_t i = tIni; i <= tEnd; ++i)
            {
                objId = inMD.addObject();
                inMD.setValue(MDL_IMAGE, fnInput + formatString("/img%d.spe", i), objId);
            }
            break;
        }
    case GENERIC:
        {
            // Get Darkfield
            std::cerr << "Getting darkfield from "+fnInput << " ..." << std::endl;
            getDarkfield(fnInput, IavgDark);
            if (XSIZE(IavgDark())!=0)
                IavgDark.write(fnRoot+"_darkfield.xmp");


            std::vector<FileName> listDir;
            fnInput.getFiles(listDir);
            size_t objId;

            for (size_t i = 0; i < listDir.size(); ++i)
            {
                if (!listDir[i].hasImageExtension())
                    continue;
                objId = inMD.addObject();
                inMD.setValue(MDL_IMAGE, fnInput+"/"+listDir[i], objId);
            }
        }
        break;
    }

    inMD.findObjects(objIds);
    size_t nIm = inMD.size();

    // Create empty output stack file

    getImageInfo(inMD, imgInfo);


    /* Get the flatfield:: We get the FF after the image list because we need the image size to adapt the FF
     * in case they were already cropped.
     */
    if (!fnFlat.empty())
    {
        std::cout << "Getting flatfield from "+fnFlat << " ..." << std::endl;
        getFlatfield(fnFlat,IavgFlat);
        if ( XSIZE(IavgFlat()) != 0 )
        {
            FileName ffName = fnRoot+"_flatfield_avg.xmp";
            IavgFlat.write(ffName);
            fMD.setValue(MDL_IMAGE, ffName, fMD.addObject());
        }
    }

    createEmptyFile(fnOut, imgInfo.adim.xdim-cropSizeXi-cropSizeXe, imgInfo.adim.ydim-cropSizeYi-cropSizeYe, 1, nIm);

    // Process images
    td = new ThreadTaskDistributor(nIm, XMIPP_MAX(1, nIm/30));
    tm = new ThreadManager(thrNum, this);
    std::cerr << "Getting data from " << fnInput << " ...\n";
    init_progress_bar(nIm);
    tm->run(runThread);
    progress_bar(nIm);

    // Write Metadata and angles
    MetaData MDSorted;
    MDSorted.sort(outMD,MDL_ANGLE_TILT);
    MDSorted.write("tomo@"+fnRoot + ".xmd");
    if ( fMD.size() > 0 )
        fMD.write("flatfield@"+fnRoot + ".xmd", MD_APPEND);

    // We also reference initial and final images at 0 degrees for Mistral tomograms
    if ( dSource == MISTRAL )
    {
        fMD.clear();
        FileName degree0Fn = "NXtomo/instrument/sample/0_degrees_initial_image";
        if ( H5File.checkDataset(degree0Fn.c_str()))
            fMD.setValue(MDL_IMAGE, degree0Fn + "@" + fnInput, fMD.addObject());
        degree0Fn = "NXtomo/instrument/sample/0_degrees_final_image";
        if ( H5File.checkDataset(degree0Fn.c_str()))
            fMD.setValue(MDL_IMAGE, degree0Fn + "@" + fnInput, fMD.addObject());
        if ( fMD.size() > 0 )
            fMD.write("degree0@"+fnRoot + ".xmd", MD_APPEND);
    }

    // Write tlt file for IMOD
    std::ofstream fhTlt;
    fhTlt.open((fnRoot+".tlt").c_str());
    if (!fhTlt)
        REPORT_ERROR(ERR_IO_NOWRITE,fnRoot+".tlt");
    FOR_ALL_OBJECTS_IN_METADATA(MDSorted)
    {
        double tilt;
        MDSorted.getValue(MDL_ANGLE_TILT,tilt,__iter.objId);
        fhTlt << tilt << std::endl;
    }
    fhTlt.close();
    delete td;
    delete tm;
}
// Fit the beam-induced translations for all average micrographs
void ParticlePolisherMpi::fitMovementsAllMicrographs()
{

	int total_nr_micrographs = exp_model.average_micrographs.size();

	// Each node does part of the work
	long int my_first_micrograph, my_last_micrograph, my_nr_micrographs;
	divide_equally(total_nr_micrographs, node->size, node->rank, my_first_micrograph, my_last_micrograph);
	my_nr_micrographs = my_last_micrograph - my_first_micrograph + 1;

	// Loop over all average micrographs
	int barstep;
	if (verb > 0)
	{
		std::cout << " + Fitting straight paths for beam-induced movements in all micrographs ... " << std::endl;
		init_progress_bar(my_nr_micrographs);
		barstep = XMIPP_MAX(1, my_nr_micrographs/ 60);
	}

	for (long int i = my_first_micrograph; i <= my_last_micrograph; i++)
	{
    	if (verb > 0 && i % barstep == 0)
			progress_bar(i);

		fitMovementsOneMicrograph(i);
	}

	// Wait until all micrographs have been done
	MPI_Barrier(MPI_COMM_WORLD);

	if (verb > 0)
	{
		progress_bar(my_nr_micrographs);
	}

	// Combine results from all nodes
	MultidimArray<DOUBLE> allnodes_fitted_movements;
	allnodes_fitted_movements.resize(fitted_movements);
	MPI_Allreduce(MULTIDIM_ARRAY(fitted_movements), MULTIDIM_ARRAY(allnodes_fitted_movements), MULTIDIM_SIZE(fitted_movements), MY_MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
	fitted_movements = allnodes_fitted_movements;

    // Set the fitted movements in the xoff and yoff columns of the exp_model.MDimg
    for (long int ipart = 0; ipart < exp_model.numberOfParticles(); ipart++)
	{
		long int part_id = exp_model.particles[ipart].id;
		DOUBLE xoff = DIRECT_A2D_ELEM(fitted_movements, part_id, 0);
		DOUBLE yoff = DIRECT_A2D_ELEM(fitted_movements, part_id, 1);
		exp_model.MDimg.setValue(EMDL_ORIENT_ORIGIN_X, xoff, part_id);
		exp_model.MDimg.setValue(EMDL_ORIENT_ORIGIN_Y, yoff, part_id);
	}

    if (node->isMaster())
    {
		// Write out the STAR file with all the fitted movements
		FileName fn_tmp = fn_in.withoutExtension() + "_" + fn_out + ".star";
		exp_model.MDimg.write(fn_tmp);
		std::cout << " + Written out all fitted movements in STAR file: " << fn_tmp << std::endl;
    }


}
void ParticlePolisherMpi::optimiseBeamTilt()
{

	// This function assumes the shiny particles are in exp_mdel.MDimg!!

	if (beamtilt_max <= 0. && defocus_shift_max <= 0.)
		return;

	if (minres_beamtilt < maxres_model)
	{
		if (verb > 0)
			std::cout << " Skipping beamtilt correction, as the resolution of the shiny reconstruction  does not go beyond minres_beamtilt of " << minres_beamtilt << " Ang." << std::endl;
		return;
	}

	getBeamTiltGroups();

	initialiseSquaredDifferenceVectors();

	int total_nr_micrographs = exp_model.micrographs.size();

	// Each node does part of the work
	long int my_first_micrograph, my_last_micrograph, my_nr_micrographs;
	divide_equally(total_nr_micrographs, node->size, node->rank, my_first_micrograph, my_last_micrograph);
	my_nr_micrographs = my_last_micrograph - my_first_micrograph + 1;

	// Loop over all average micrographs
	int barstep;
	if (verb > 0)
	{
		std::cout << " + Optimising beamtilts and/or defocus values in all micrographs ... " << std::endl;
		init_progress_bar(my_nr_micrographs);
		barstep = XMIPP_MAX(1, my_nr_micrographs/ 60);
	}

    for (long int i = my_first_micrograph; i <= my_last_micrograph; i++)
	{
    	if (verb > 0 && i % barstep == 0)
			progress_bar(i);

    	optimiseBeamTiltAndDefocusOneMicrograph(i);
	}

   	if (verb > 0)
   		progress_bar(my_nr_micrographs);

	// Combine results from all nodes
	if (beamtilt_max > 0.)
	{
		MultidimArray<DOUBLE> allnodes_diff2_beamtilt;
		allnodes_diff2_beamtilt.initZeros(diff2_beamtilt);
		MPI_Allreduce(MULTIDIM_ARRAY(diff2_beamtilt), MULTIDIM_ARRAY(allnodes_diff2_beamtilt), MULTIDIM_SIZE(diff2_beamtilt), MY_MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
		diff2_beamtilt = allnodes_diff2_beamtilt;
	}

	if (defocus_shift_max > 0.)
	{
		MultidimArray<DOUBLE> allnodes_defocus_shift_allmics;
		allnodes_defocus_shift_allmics.initZeros(defocus_shift_allmics);
		MPI_Allreduce(MULTIDIM_ARRAY(defocus_shift_allmics), MULTIDIM_ARRAY(allnodes_defocus_shift_allmics), MULTIDIM_SIZE(defocus_shift_allmics), MY_MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
		defocus_shift_allmics = allnodes_defocus_shift_allmics;
	}

	// Now get the final optimised beamtilts and defocus shifts, and write results to the MetadataTable
	applyOptimisedBeamTiltsAndDefocus();

	// Write the new MDTable to disc
	if (verb > 0)
		exp_model.MDimg.write(fn_out + ".star");

}
Exemple #8
0
void cmd_savefile(WINDOW *window, ToxWindow *self, Tox *m, int argc, char (*argv)[MAX_STR_SIZE])
{
    if (argc < 1) {
        line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "File ID required.");
        return;
    }

    long int idx = strtol(argv[1], NULL, 10);

    if ((idx == 0 && strcmp(argv[1], "0")) || idx < 0 || idx >= MAX_FILES) {
        line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "No pending file transfers with that ID.");
        return;
    }

    struct FileTransfer *ft = get_file_transfer_struct_index(self->num, idx, FILE_TRANSFER_RECV);

    if (!ft) {
        line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "No pending file transfers with that ID.");
        return;
    }

    if (ft->state != FILE_TRANSFER_PENDING) {
        line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "No pending file transfers with that ID.");
        return;
    }

    if ((ft->file = fopen(ft->file_path, "a")) == NULL) {
        const char *msg =  "File transfer failed: Invalid file path.";
        close_file_transfer(self, m, ft, TOX_FILE_CONTROL_CANCEL, msg, notif_error);
        return;
    }

    TOX_ERR_FILE_CONTROL err;
    tox_file_control(m, self->num, ft->filenum, TOX_FILE_CONTROL_RESUME, &err);

    if (err != TOX_ERR_FILE_CONTROL_OK)
        goto on_recv_error;

    line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "Saving file [%d] as: '%s'", idx, ft->file_path);

    /* prep progress bar line */
    char progline[MAX_STR_SIZE];
    init_progress_bar(progline);
    line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "%s", progline);

    ft->line_id = self->chatwin->hst->line_end->id + 2;
    ft->state = FILE_TRANSFER_STARTED;

    return;

on_recv_error:

    switch (err) {
        case TOX_ERR_FILE_CONTROL_FRIEND_NOT_FOUND:
            line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "File transfer failed: Friend not found.");
            return;

        case TOX_ERR_FILE_CONTROL_FRIEND_NOT_CONNECTED:
            line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "File transfer failed: Friend is not online.");
            return;

        case TOX_ERR_FILE_CONTROL_NOT_FOUND:
            line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "File transfer failed: Invalid filenumber.");
            return;

        case TOX_ERR_FILE_CONTROL_SENDQ:
            line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "File transfer failed: Connection error.");
            return;

        default:
            line_info_add(self, NULL, NULL, NULL, SYS_MSG, 0, 0, "File transfer failed (error %d)\n", err);
            return;
    }
}
Exemple #9
0
/*
 * Find/display global/local variables which own the most heap memory in bytes
 */
CA_BOOL biggest_heap_owners_generic(unsigned int num, CA_BOOL all_reachable_blocks)
{
	CA_BOOL rc = CA_FALSE;
	unsigned int i;
	int nregs = 0;
	struct reg_value *regs_buf = NULL;
	size_t ptr_sz = g_ptr_bit >> 3;
	struct heap_owner *owners;
	struct heap_owner *smallest;

	struct ca_segment *segment;
	size_t total_bytes = 0;
	size_t processed_bytes = 0;

	struct inuse_block *inuse_blocks = NULL;
	unsigned long num_inuse_blocks;
	unsigned long inuse_index;

	struct inuse_block *blk;
	struct object_reference ref;
	size_t aggr_size;
	unsigned long aggr_count;
	address_t start, end, cursor;

	// Allocate an array for the biggest num of owners
	if (num == 0)
		return CA_FALSE;
	owners = (struct heap_owner *) calloc(num, sizeof(struct heap_owner));
	if (!owners)
		goto clean_out;
	smallest = &owners[num - 1];

	// First, create and populate an array of all in-use blocks
	inuse_blocks = build_inuse_heap_blocks(&num_inuse_blocks);
	if (!inuse_blocks || num_inuse_blocks == 0)
	{
		CA_PRINT("Failed: no in-use heap block is found\n");
		goto clean_out;
	}

	// estimate the work to enable progress bar
	for (i=0; i<g_segment_count; i++)
	{
		segment = &g_segments[i];
		if (segment->m_type == ENUM_STACK || segment->m_type == ENUM_MODULE_DATA)
			total_bytes += segment->m_fsize;
	}
	init_progress_bar(total_bytes);

	// Walk through all segments of threads' registers/stacks or globals
	for (i=0; i<g_segment_count; i++)
	{
		// bail out if user is impatient for the long searching
		if (user_request_break())
		{
			CA_PRINT("Abort searching biggest heap memory owners\n");
			goto clean_out;
		}

		// Only thread stack and global .data sections are considered
		segment = &g_segments[i];
		if (segment->m_type == ENUM_STACK || segment->m_type == ENUM_MODULE_DATA)
		{
			int tid = 0;
			// check registers if it is a thread's stack segment
			if (segment->m_type == ENUM_STACK)
			{
				tid = get_thread_id (segment);
				// allocate register value buffer for once
				if (!nregs && !regs_buf)
				{
					nregs = read_registers (NULL, NULL, 0);
					if (nregs)
						regs_buf = (struct reg_value*) malloc(nregs * sizeof(struct reg_value));
				}
				// check each register for heap reference
				if (nregs && regs_buf)
				{
					int k;
					int nread = read_registers (segment, regs_buf, nregs);
					for (k = 0; k < nread; k++)
					{
						if (regs_buf[k].reg_width == ptr_sz)
						{
							blk = find_inuse_block(regs_buf[k].value, inuse_blocks, num_inuse_blocks);
							if (blk)
							{
								ref.storage_type = ENUM_REGISTER;
								ref.vaddr = 0;
								ref.value = blk->addr;
								ref.where.reg.tid = tid;
								ref.where.reg.reg_num = k;
								ref.where.reg.name = NULL;
								calc_aggregate_size(&ref, ptr_sz, all_reachable_blocks, inuse_blocks, num_inuse_blocks, &aggr_size, &aggr_count);
								if (aggr_size > smallest->aggr_size)
								{
									struct heap_owner newowner;
									newowner.ref = ref;
									newowner.aggr_size = aggr_size;
									newowner.aggr_count = aggr_count;
									add_owner(owners, num, &newowner);
								}
							}
						}
					}
				}
			}

			// Calculate the memory region to search
			if (segment->m_type == ENUM_STACK)
			{
				start = get_rsp(segment);
				if (start < segment->m_vaddr || start >= segment->m_vaddr + segment->m_vsize)
					start = segment->m_vaddr;
				if (start - segment->m_vaddr >= segment->m_fsize)
					end = start;
				else
					end = segment->m_vaddr + segment->m_fsize;
			}
			else if (segment->m_type == ENUM_MODULE_DATA)
			{
				start = segment->m_vaddr;
				end = segment->m_vaddr + segment->m_fsize;
			}
			else
				continue;

			// Evaluate each variable or raw pointer in the target memory region
			cursor = ALIGN(start, ptr_sz);
			while (cursor < end)
			{
				size_t val_len = ptr_sz;
				address_t sym_addr;
				size_t    sym_sz;
				CA_BOOL known_sym = CA_FALSE;

				// If the address belongs to a known variable, include all its subfields
				// FIXME
				// consider subfields that are of pointer-like types, however, it will miss
				// references in an unstructured buffer
				ref.storage_type = segment->m_type;
				ref.vaddr = cursor;
				if (segment->m_type == ENUM_STACK)
				{
					ref.where.stack.tid = tid;
					ref.where.stack.frame = get_frame_number(segment, cursor, &ref.where.stack.offset);
					if (known_stack_sym(&ref, &sym_addr, &sym_sz) && sym_sz)
						known_sym = CA_TRUE;
				}
				else if (segment->m_type == ENUM_MODULE_DATA)
				{
					ref.where.module.base = segment->m_vaddr;
					ref.where.module.size = segment->m_vsize;
					ref.where.module.name = segment->m_module_name;
					if (known_global_sym(&ref, &sym_addr, &sym_sz) && sym_sz)
						known_sym = CA_TRUE;
				}
				if (known_sym)
				{
					if (cursor != sym_addr)
						ref.vaddr = cursor = sym_addr;	// we should never come to here!
					val_len = sym_sz;
				}

				// Query heap for aggregated memory size/count originated from the candidate variable
				if (val_len >= ptr_sz)
				{
					calc_aggregate_size(&ref, val_len, all_reachable_blocks, inuse_blocks, num_inuse_blocks, &aggr_size, &aggr_count);
					// update the top list if applies
					if (aggr_size >= smallest->aggr_size)
					{
						struct heap_owner newowner;
						if (val_len == ptr_sz)
							read_memory_wrapper(NULL, ref.vaddr, (void*)&ref.value, ptr_sz);
						else
							ref.value = 0;
						newowner.ref = ref;
						newowner.aggr_size = aggr_size;
						newowner.aggr_count = aggr_count;
						add_owner(owners, num, &newowner);
					}
				}
				cursor = ALIGN(cursor + val_len, ptr_sz);
			}
			processed_bytes += segment->m_fsize;
			set_current_progress(processed_bytes);
		}
	}
	end_progress_bar();

	if (!all_reachable_blocks)
	{
		// Big memory blocks may be referenced indirectly by local/global variables
		// check all in-use blocks
		for (inuse_index = 0; inuse_index < num_inuse_blocks; inuse_index++)
		{
			blk = &inuse_blocks[inuse_index];
			ref.storage_type = ENUM_HEAP;
			ref.vaddr = blk->addr;
			ref.where.heap.addr = blk->addr;
			ref.where.heap.size = blk->size;
			ref.where.heap.inuse = 1;
			calc_aggregate_size(&ref, ptr_sz, CA_FALSE, inuse_blocks, num_inuse_blocks, &aggr_size, &aggr_count);
			// update the top list if applies
			if (aggr_size >= smallest->aggr_size)
			{
				struct heap_owner newowner;
				ref.value = 0;
				newowner.ref = ref;
				newowner.aggr_size = aggr_size;
				newowner.aggr_count = aggr_count;
				add_owner(owners, num, &newowner);
			}
		}
	}

	// Print the result
	for (i = 0; i < num; i++)
	{
		struct heap_owner *owner = &owners[i];
		if (owner->aggr_size)
		{
			CA_PRINT("[%d] ", i+1);
			print_ref(&owner->ref, 0, CA_FALSE, CA_FALSE);
			CA_PRINT("    |--> ");
			print_size(owner->aggr_size);
			CA_PRINT(" (%ld blocks)\n", owner->aggr_count);
		}
	}
	rc = CA_TRUE;

clean_out:
	// clean up
	if (regs_buf)
		free (regs_buf);
	if (owners)
		free (owners);
	if (inuse_blocks)
		free_inuse_heap_blocks (inuse_blocks, num_inuse_blocks);

	return rc;
}
    void run ()
    {
        mask.allowed_data_types = INT_MASK;

        // Main program =========================================================
        params.V1.read(fn1);
        params.V1().setXmippOrigin();
        params.V2.read(fn2);
        params.V2().setXmippOrigin();

        // Initialize best_fit
        double best_fit = 1e38;
        Matrix1D<double> best_align(8);
        bool first = true;

        // Generate mask
        if (mask_enabled)
        {
            mask.generate_mask(params.V1());
            params.mask_ptr = &(mask.get_binary_mask());
        }
        else
            params.mask_ptr = NULL;

        // Exhaustive search
        if (!usePowell && !useFRM)
        {
            // Count number of iterations
            int times = 1;
            if (!tell)
            {
                if (grey_scale0 != grey_scaleF)
                    times *= FLOOR(1 + (grey_scaleF - grey_scale0) / step_grey);
                if (grey_shift0 != grey_shiftF)
                    times *= FLOOR(1 + (grey_shiftF - grey_shift0) / step_grey_shift);
                if (rot0 != rotF)
                    times *= FLOOR(1 + (rotF - rot0) / step_rot);
                if (tilt0 != tiltF)
                    times *= FLOOR(1 + (tiltF - tilt0) / step_tilt);
                if (psi0 != psiF)
                    times *= FLOOR(1 + (psiF - psi0) / step_psi);
                if (scale0 != scaleF)
                    times *= FLOOR(1 + (scaleF - scale0) / step_scale);
                if (z0 != zF)
                    times *= FLOOR(1 + (zF - z0) / step_z);
                if (y0 != yF)
                    times *= FLOOR(1 + (yF - y0) / step_y);
                if (x0 != xF)
                    times *= FLOOR(1 + (xF - x0) / step_x);
                init_progress_bar(times);
            }
            else
                std::cout << "#grey_factor rot tilt psi scale z y x fitness\n";

            // Iterate
            int itime = 0;
            int step_time = CEIL((double)times / 60.0);
            Matrix1D<double> r(3);
            Matrix1D<double> trial(9);
            for (double grey_scale = grey_scale0; grey_scale <= grey_scaleF ; grey_scale += step_grey)
                for (double grey_shift = grey_shift0; grey_shift <= grey_shiftF ; grey_shift += step_grey_shift)
                    for (double rot = rot0; rot <= rotF ; rot += step_rot)
                        for (double tilt = tilt0; tilt <= tiltF ; tilt += step_tilt)
                            for (double psi = psi0; psi <= psiF ; psi += step_psi)
                                for (double scale = scale0; scale <= scaleF ; scale += step_scale)
                                    for (ZZ(r) = z0; ZZ(r) <= zF ; ZZ(r) += step_z)
                                        for (YY(r) = y0; YY(r) <= yF ; YY(r) += step_y)
                                            for (XX(r) = x0; XX(r) <= xF ; XX(r) += step_x)
                                            {
                                                // Form trial vector
                                                trial(0) = grey_scale;
                                                trial(1) = grey_shift;
                                                trial(2) = rot;
                                                trial(3) = tilt;
                                                trial(4) = psi;
                                                trial(5) = scale;
                                                trial(6) = ZZ(r);
                                                trial(7) = YY(r);
                                                trial(8) = XX(r);

                                                // Evaluate
                                                double fit = fitness(MATRIX1D_ARRAY(trial));

                                                // The best?
                                                if (fit < best_fit || first)
                                                {
                                                    best_fit = fit;
                                                    best_align = trial;
                                                    first = false;
                                                    if (tell)
                                                    	std::cout << "Best so far\n";
                                                }

                                                // Show fit
                                                if (tell)
                                                    std::cout << trial << " " << fit << std::endl;
                                                else
                                                    if (++itime % step_time == 0)
                                                        progress_bar(itime);
                                            }
            if (!tell)
                progress_bar(times);
        }
        else if (usePowell)
        {
            // Use Powell optimization
            Matrix1D<double> x(9), steps(9);
            double fitness;
            int iter;
            steps.initConstant(1);
            if (onlyShift)
                steps(0)=steps(1)=steps(2)=steps(3)=steps(4)=steps(5)=0;
            if (params.alignment_method == COVARIANCE)
                steps(0)=steps(1)=0;
            x(0)=grey_scale0;
            x(1)=grey_shift0;
            x(2)=rot0;
            x(3)=tilt0;
            x(4)=psi0;
            x(5)=scale0;
            x(6)=z0;
            x(7)=y0;
            x(8)=x0;

            powellOptimizer(x,1,9,&wrapperFitness,NULL,0.01,fitness,iter,steps,true);
            best_align=x;
            best_fit=fitness;
            first=false;
        }
        else if (useFRM)
        {
    		String scipionPython;
    		initializeScipionPython(scipionPython);
    		PyObject * pFunc = getPointerToPythonFRMFunction();
    		double rot,tilt,psi,x,y,z,score;
    		Matrix2D<double> A;
    		alignVolumesFRM(pFunc, params.V1(), params.V2(), Py_None, rot,tilt,psi,x,y,z,score,A,maxShift,maxFreq,params.mask_ptr);
    		best_align.initZeros(9);
    		best_align(0)=1; // Gray scale
    		best_align(1)=0; // Gray shift
    		best_align(2)=rot;
    		best_align(3)=tilt;
    		best_align(4)=psi;
    		best_align(5)=1; // Scale
    		best_align(6)=z;
    		best_align(7)=y;
    		best_align(8)=x;
    		best_fit=-score;
        }

        if (!first)
            std::cout << "The best correlation is for\n"
            << "Scale                  : " << best_align(5) << std::endl
            << "Translation (X,Y,Z)    : " << best_align(8)
            << " " << best_align(7) << " " << best_align(6)
            << std::endl
            << "Rotation (rot,tilt,psi): "
            << best_align(2) << " " << best_align(3) << " "
            << best_align(4) << std::endl
            << "Best grey scale       : " << best_align(0) << std::endl
            << "Best grey shift       : " << best_align(1) << std::endl
            << "Fitness value         : " << best_fit << std::endl;
        Matrix1D<double> r(3);
        XX(r)            = best_align(8);
        YY(r)            = best_align(7);
        ZZ(r)            = best_align(6);
        Matrix2D<double> A,Aaux;
        Euler_angles2matrix(best_align(2), best_align(3), best_align(4),
                            A, true);
        translation3DMatrix(r,Aaux);
        A = A * Aaux;
        scale3DMatrix(vectorR3(best_align(5), best_align(5), best_align(5)),Aaux);
        A = A * Aaux;
        if (verbose!=0)
			std::cout << "xmipp_transform_geometry will require the following values"
					  << "\n   Angles: " << best_align(2) << " "
					  << best_align(3) << " " << best_align(4)
					  << "\n   Shifts: " << A(0,3) << " " << A(1,3) << " " << A(2,3)
					  << std::endl;
        if (apply)
        {
            applyTransformation(params.V2(),params.Vaux(),MATRIX1D_ARRAY(best_align));
            params.V2()=params.Vaux();
            params.V2.write(fnOut);
        }
    }
//majorAxis and minorAxis is the estimated particle size in px
void ProgSortByStatistics::processInprocessInputPrepareSPTH(MetaData &SF, bool trained)
{
    //#define DEBUG
    PCAMahalanobisAnalyzer tempPcaAnalyzer0;
    PCAMahalanobisAnalyzer tempPcaAnalyzer1;
    PCAMahalanobisAnalyzer tempPcaAnalyzer2;
    PCAMahalanobisAnalyzer tempPcaAnalyzer3;
    PCAMahalanobisAnalyzer tempPcaAnalyzer4;

    //Morphology
    tempPcaAnalyzer0.clear();
    //Signal to noise ratio
    tempPcaAnalyzer1.clear();
    tempPcaAnalyzer2.clear();
    tempPcaAnalyzer3.clear();
    //Histogram analysis, to detect black points and saturated parts
    tempPcaAnalyzer4.clear();

    double sign = 1;//;-1;
    int numNorm = 3;
    int numDescriptors0=numNorm;
    int numDescriptors2=4;
    int numDescriptors3=11;
    int numDescriptors4 = 10;

    MultidimArray<float> v0(numDescriptors0);
    MultidimArray<float> v2(numDescriptors2);
    MultidimArray<float> v3(numDescriptors3);
    MultidimArray<float> v4(numDescriptors4);

    if (verbose>0)
    {
        std::cout << " Sorting particle set by new xmipp method..." << std::endl;
    }

    int nr_imgs = SF.size();
    if (verbose>0)
        init_progress_bar(nr_imgs);

    int c = XMIPP_MAX(1, nr_imgs / 60);
    int imgno = 0, imgnoPCA=0;

    bool thereIsEnable=SF.containsLabel(MDL_ENABLED);
    bool first=true;

    // We assume that at least there is one particle
    size_t Xdim, Ydim, Zdim, Ndim;
    getImageSize(SF,Xdim,Ydim,Zdim,Ndim);

    //Initialization:
    MultidimArray<double> nI, modI, tempI, tempM, ROI;
    MultidimArray<bool> mask;
    nI.resizeNoCopy(Ydim,Xdim);
    modI.resizeNoCopy(Ydim,Xdim);
    tempI.resizeNoCopy(Ydim,Xdim);
    tempM.resizeNoCopy(Ydim,Xdim);
    mask.resizeNoCopy(Ydim,Xdim);
    mask.initConstant(true);

    MultidimArray<double> autoCorr(2*Ydim,2*Xdim);
    MultidimArray<double> smallAutoCorr;

    Histogram1D hist;
    Matrix2D<double> U,V,temp;
    Matrix1D<double> D;

    MultidimArray<int> radial_count;
    MultidimArray<double> radial_avg;
    Matrix1D<int> center(2);
    MultidimArray<int> distance;
    int dim;
    center.initZeros();

    v0.initZeros(numDescriptors0);
    v2.initZeros(numDescriptors2);
    v3.initZeros(numDescriptors3);
    v4.initZeros(numDescriptors4);

    ROI.resizeNoCopy(Ydim,Xdim);
    ROI.setXmippOrigin();
    FOR_ALL_ELEMENTS_IN_ARRAY2D(ROI)
    {
        double temp = std::sqrt(i*i+j*j);
        if ( temp < (Xdim/2))
            A2D_ELEM(ROI,i,j)= 1;
        else
            A2D_ELEM(ROI,i,j)= 0;
    }

    Image<double> img;
    FourierTransformer transformer(FFTW_BACKWARD);

    FOR_ALL_OBJECTS_IN_METADATA(SF)
    {
        if (thereIsEnable)
        {
            int enabled;
            SF.getValue(MDL_ENABLED,enabled,__iter.objId);
            if ( (enabled==-1)  )
            {
                imgno++;
                continue;
            }
        }

        img.readApplyGeo(SF,__iter.objId);
        if (targetXdim!=-1 && targetXdim!=XSIZE(img()))
        	selfScaleToSize(LINEAR,img(),targetXdim,targetXdim,1);

        MultidimArray<double> &mI=img();
        mI.setXmippOrigin();
        mI.statisticsAdjust(0,1);
        mask.setXmippOrigin();
        //The size of v1 depends on the image size and must be declared here
        int numDescriptors1 = XSIZE(mI)/2; //=100;
        MultidimArray<float> v1(numDescriptors1);
        v1.initZeros(numDescriptors1);

        double var = 1;
        normalize(transformer,mI,tempI,modI,0,var,mask);
        modI.setXmippOrigin();
        tempI.setXmippOrigin();
        nI = sign*tempI*(modI*modI);
        tempM = (modI*modI);

        A1D_ELEM(v0,0) = (tempM*ROI).sum();
        int index = 1;
        var+=2;
        while (index < numNorm)
        {
            normalize(transformer,mI,tempI,modI,0,var,mask);
            modI.setXmippOrigin();
            tempI.setXmippOrigin();
            nI += sign*tempI*(modI*modI);
            tempM += (modI*modI);
            A1D_ELEM(v0,index) = (tempM*ROI).sum();
            index++;
            var+=2;
        }

        nI /= tempM;
        tempPcaAnalyzer0.addVector(v0);
        nI=(nI*ROI);

        auto_correlation_matrix(mI,autoCorr);
        if (first)
        {
            radialAveragePrecomputeDistance(autoCorr, center, distance, dim);
            first=false;
        }
        fastRadialAverage(autoCorr, distance, dim, radial_avg, radial_count);

        for (int n = 0; n < numDescriptors1; ++n)
            A1D_ELEM(v1,n)=(float)DIRECT_A1D_ELEM(radial_avg,n);

        tempPcaAnalyzer1.addVector(v1);

#ifdef DEBUG

        //String name = "000005@Images/Extracted/run_002/extra/BPV_1386.stk";
        String name = "000010@Images/Extracted/run_001/extra/KLH_Dataset_I_Training_0028.stk";
        //String name = "001160@Images/Extracted/run_001/DefaultFamily5";

        std::cout << img.name() << std::endl;

        if (img.name()==name2)
        {
            FileName fpName    = "test_1.txt";
            mI.write(fpName);
            fpName    = "test_2.txt";
            nI.write(fpName);
            fpName    = "test_3.txt";
            tempM.write(fpName);
            fpName    = "test_4.txt";
            ROI.write(fpName);
            //exit(1);
        }
#endif
        nI.binarize(0);
        int im = labelImage2D(nI,nI,8);
        compute_hist(nI, hist, 0, im, im+1);
        size_t l;
        int k,i,j;
        hist.maxIndex(l,k,i,j);
        A1D_ELEM(hist,j)=0;
        hist.maxIndex(l,k,i,j);
        nI.binarizeRange(j-1,j+1);

        double x0=0,y0=0,majorAxis=0,minorAxis=0,ellipAng=0;
        size_t area=0;
        fitEllipse(nI,x0,y0,majorAxis,minorAxis,ellipAng,area);

        A1D_ELEM(v2,0)=majorAxis/((img().xdim) );
        A1D_ELEM(v2,1)=minorAxis/((img().xdim) );
        A1D_ELEM(v2,2)= (fabs((img().xdim)/2-x0)+fabs((img().ydim)/2-y0))/((img().xdim)/2);
        A1D_ELEM(v2,3)=area/( (double)((img().xdim)/2)*((img().ydim)/2) );

        for (int n=0 ; n < numDescriptors2 ; n++)
        {
            if ( std::isnan(std::abs(A1D_ELEM(v2,n))))
                A1D_ELEM(v2,n)=0;
        }

        tempPcaAnalyzer2.addVector(v2);

        //mI.setXmippOrigin();
        //auto_correlation_matrix(mI*ROI,autoCorr);
        //auto_correlation_matrix(nI,autoCorr);
        autoCorr.window(smallAutoCorr,-5,-5, 5, 5);
        smallAutoCorr.copy(temp);
        svdcmp(temp,U,D,V);

        for (int n = 0; n < numDescriptors3; ++n)
            A1D_ELEM(v3,n)=(float)VEC_ELEM(D,n); //A1D_ELEM(v3,n)=(float)VEC_ELEM(D,n)/VEC_ELEM(D,0);

        tempPcaAnalyzer3.addVector(v3);


        double minVal=0.;
        double maxVal=0.;
        mI.computeDoubleMinMax(minVal,maxVal);
        compute_hist(mI, hist, minVal, maxVal, 100);

        for (int n=0 ; n <= numDescriptors4-1 ; n++)
        {
            A1D_ELEM(v4,n)= (hist.percentil((n+1)*10));
        }
        tempPcaAnalyzer4.addVector(v4);

#ifdef DEBUG

        if (img.name()==name1)
        {
            FileName fpName    = "test.txt";
            mI.write(fpName);
            fpName    = "test3.txt";
            nI.write(fpName);
        }
#endif
        imgno++;
        imgnoPCA++;

        if (imgno % c == 0 && verbose>0)
            progress_bar(imgno);
    }

    tempPcaAnalyzer0.evaluateZScore(2,20,trained);
    tempPcaAnalyzer1.evaluateZScore(2,20,trained);
    tempPcaAnalyzer2.evaluateZScore(2,20,trained);
    tempPcaAnalyzer3.evaluateZScore(2,20,trained);
    tempPcaAnalyzer4.evaluateZScore(2,20,trained);

    pcaAnalyzer.push_back(tempPcaAnalyzer0);
    pcaAnalyzer.push_back(tempPcaAnalyzer1);
    pcaAnalyzer.push_back(tempPcaAnalyzer1);
    pcaAnalyzer.push_back(tempPcaAnalyzer3);
    pcaAnalyzer.push_back(tempPcaAnalyzer4);

}
void ProgSortByStatistics::processInputPrepare(MetaData &SF)
{
    PCAMahalanobisAnalyzer tempPcaAnalyzer;
    tempPcaAnalyzer.clear();

    Image<double> img;
    MultidimArray<double> img2;
    MultidimArray<int> radial_count;
    MultidimArray<double> radial_avg;
    Matrix1D<int> center(2);
    center.initZeros();

    if (verbose>0)
        std::cout << " Processing training set ..." << std::endl;

    int nr_imgs = SF.size();
    if (verbose>0)
        init_progress_bar(nr_imgs);
    int c = XMIPP_MAX(1, nr_imgs / 60);
    int imgno = 0, imgnoPCA=0;
    MultidimArray<float> v;
    MultidimArray<int> distance;
    int dim;

    bool thereIsEnable=SF.containsLabel(MDL_ENABLED);
    bool first=true;
    FOR_ALL_OBJECTS_IN_METADATA(SF)
    {
        if (thereIsEnable)
        {
            int enabled;
            SF.getValue(MDL_ENABLED,enabled,__iter.objId);
            if (enabled==-1)
                continue;
        }
        img.readApplyGeo(SF,__iter.objId);
        if (targetXdim!=-1 && targetXdim!=XSIZE(img()))
        	selfScaleToSize(LINEAR,img(),targetXdim,targetXdim,1);
        MultidimArray<double> &mI=img();
        mI.setXmippOrigin();
        mI.statisticsAdjust(0,1);

        // Overall statistics
        Histogram1D hist;
        compute_hist(mI,hist,-4,4,31);

        // Radial profile
        img2.resizeNoCopy(mI);
        FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(img2)
        {
            double val=DIRECT_MULTIDIM_ELEM(mI,n);
            DIRECT_MULTIDIM_ELEM(img2,n)=val*val;
        }
        if (first)
        {
            radialAveragePrecomputeDistance(img2, center, distance, dim);
            first=false;
        }
        fastRadialAverage(img2, distance, dim, radial_avg, radial_count);

        // Build vector
        v.initZeros(XSIZE(hist)+XSIZE(img2)/2);
        int idx=0;
        FOR_ALL_DIRECT_ELEMENTS_IN_ARRAY1D(hist)
        v(idx++)=(float)DIRECT_A1D_ELEM(hist,i);
        for (size_t i=0; i<XSIZE(img2)/2; i++)
            v(idx++)=(float)DIRECT_A1D_ELEM(radial_avg,i);

        tempPcaAnalyzer.addVector(v);

        if (imgno % c == 0 && verbose>0)
            progress_bar(imgno);
        imgno++;
        imgnoPCA++;
    }
    if (verbose>0)
        progress_bar(nr_imgs);

    MultidimArray<double> vavg,vstddev;
    tempPcaAnalyzer.computeStatistics(vavg,vstddev);
    tempPcaAnalyzer.evaluateZScore(2,20,false);
    pcaAnalyzer.insert(pcaAnalyzer.begin(), tempPcaAnalyzer);
}
Exemple #13
0
	void run()
	{
		MD.read(fn_star);

		// Check for rlnImageName label
		if (!MD.containsLabel(EMDL_IMAGE_NAME))
			REPORT_ERROR("ERROR: Input STAR file does not contain the rlnImageName label");

		if (do_split_per_micrograph && !MD.containsLabel(EMDL_MICROGRAPH_NAME))
			REPORT_ERROR("ERROR: Input STAR file does not contain the rlnMicrographName label");

		Image<DOUBLE> in;
		FileName fn_img, fn_mic;
		std::vector<FileName> fn_mics;
		std::vector<int> mics_ndims;

		// First get number of images and their size
		int ndim=0;
		bool is_first=true;
		int xdim, ydim, zdim;
		FOR_ALL_OBJECTS_IN_METADATA_TABLE(MD)
		{
			if (is_first)
			{
				MD.getValue(EMDL_IMAGE_NAME, fn_img);
				in.read(fn_img);
				xdim=XSIZE(in());
				ydim=YSIZE(in());
				zdim=ZSIZE(in());
				is_first=false;
			}

			if (do_split_per_micrograph)
			{
				MD.getValue(EMDL_MICROGRAPH_NAME, fn_mic);
				bool have_found = false;
				for (int m = 0; m < fn_mics.size(); m++)
				{
					if (fn_mic == fn_mics[m])
					{
						have_found = true;
						mics_ndims[m]++;
						break;
					}
				}
				if (!have_found)
				{
					fn_mics.push_back(fn_mic);
					mics_ndims.push_back(1);
				}
			}
			ndim++;
		}


		// If not splitting, just fill fn_mics and mics_ndim with one entry (to re-use loop below)
		if (!do_split_per_micrograph)
		{
			fn_mics.push_back("");
			mics_ndims.push_back(ndim);
		}


		// Loop over all micrographs
		for (int m = 0; m < fn_mics.size(); m++)
		{
			ndim = mics_ndims[m];
			fn_mic = fn_mics[m];

			// Resize the output image
			std::cout << "Resizing the output stack to "<< ndim<<" images of size: "<<xdim<<"x"<<ydim<<"x"<<zdim << std::endl;
			DOUBLE Gb = ndim*zdim*ydim*xdim*8./1024./1024./1024.;
			std::cout << "This will require " << Gb << "Gb of memory...."<< std::endl;
			Image<DOUBLE> out(xdim, ydim, zdim, ndim);

			int n = 0;
			init_progress_bar(ndim);
			FOR_ALL_OBJECTS_IN_METADATA_TABLE(MD)
			{
				FileName fn_mymic;
				if (do_split_per_micrograph)
					MD.getValue(EMDL_MICROGRAPH_NAME, fn_mymic);
				else
					fn_mymic="";

				if (fn_mymic == fn_mic)
				{

					MD.getValue(EMDL_IMAGE_NAME, fn_img);
					in.read(fn_img);

					if (do_apply_trans)
					{
						DOUBLE xoff = 0.;
						DOUBLE yoff = 0.;
						DOUBLE psi = 0.;
						MD.getValue(EMDL_ORIENT_ORIGIN_X, xoff);
						MD.getValue(EMDL_ORIENT_ORIGIN_Y, yoff);
						MD.getValue(EMDL_ORIENT_PSI, psi);
						// Apply the actual transformation
						Matrix2D<DOUBLE> A;
						rotation2DMatrix(psi, A);
					    MAT_ELEM(A,0, 2) = xoff;
					    MAT_ELEM(A,1, 2) = yoff;
					    selfApplyGeometry(in(), A, IS_NOT_INV, DONT_WRAP);
					}

					out().setImage(n, in());
					n++;
					if (n%100==0) progress_bar(n);

				}
			}
			progress_bar(ndim);


			FileName fn_out;
			if (do_split_per_micrograph)
			{
				// Remove any extensions from micrograph names....
				fn_out = fn_root + "_" + fn_mic.withoutExtension() + fn_ext;
			}
			else
				fn_out = fn_root + fn_ext;
			out.write(fn_out);
			std::cout << "Written out: " << fn_out << std::endl;
		}
		std::cout << "Done!" <<std::endl;
	}
	double optimiseTransformationMatrix(bool do_optimise_nr_pairs)
	{
		std::vector<int> best_pairs_t2u, best_map;
		double score, best_score, best_dist=9999.;
		if (do_optimise_nr_pairs)
			best_score = 0.;
		else
			best_score = -999999.;

		int nn = XMIPP_MAX(1., (rotF-rot0)/rotStep);
		nn *= XMIPP_MAX(1., (tiltF-tilt0)/tiltStep);
		nn *= XMIPP_MAX(1., (xF-x0)/xStep);
		nn *= XMIPP_MAX(1., (yF-y0)/yStep);
		int n = 0;
		init_progress_bar(nn);
		for (double rot = rot0; rot <= rotF; rot+= rotStep)
		{
			for (double tilt = tilt0; tilt <= tiltF; tilt+= tiltStep)
			{
				// Assume tilt-axis lies in-plane...
				double psi = -rot;
				// Rotate all points correspondingly
				Euler_angles2matrix(rot, tilt, psi, Pass);
				//std::cerr << " Pass= " << Pass << std::endl;
				// Zero-translations for now (these are added in the x-y loops below)
				MAT_ELEM(Pass, 0, 2) = MAT_ELEM(Pass, 1, 2) = 0.;
				mapOntoTilt();
				for (int x = x0; x <= xF; x += xStep)
				{
					for (int y = y0; y <= yF; y += yStep, n++)
					{
						if (do_optimise_nr_pairs)
							score = getNumberOfPairs(x, y);
						else
							score = -getAverageDistance(x, y); // negative because smaller distance is better!

                                                bool is_best = false;
                                                if (do_optimise_nr_pairs && score==best_score)
                                                {
                                                    double dist = getAverageDistance(x, y);
                                                    if (dist < best_dist)
                                                    {
                                                        best_dist = dist;
                                                        is_best = true;
                                                    }
                                                }
						if (score > best_score || is_best)
						{
							best_score = score;
							best_pairs_t2u = pairs_t2u;
							best_rot = rot;
							best_tilt = tilt;
							best_x = x;
							best_y = y;
						}
						if (n%1000==0) progress_bar(n);
					}
				}
			}
		}
		progress_bar(nn);
		// Update pairs with the best_pairs
		if (do_optimise_nr_pairs)
			pairs_t2u = best_pairs_t2u;

		// Update the Passing matrix and the mapping
		Euler_angles2matrix(best_rot, best_tilt, -best_rot, Pass);
		// Zero-translations for now (these are added in the x-y loops below)
		MAT_ELEM(Pass, 0, 2) = MAT_ELEM(Pass, 1, 2) = 0.;
		mapOntoTilt();
		return best_score;

	}
void ProgValidationNonTilt::run()
{
    //Clustering Tendency and Cluster Validity Stephen D. Scott
    randomize_random_generator();
    //char buffer[400];
    //sprintf(buffer, "xmipp_reconstruct_significant -i %s  --initvolumes %s --odir %s --sym  %s --iter 1 --alpha0 %f --angularSampling %f",fnIn.c_str(), fnInit.c_str(),fnDir.c_str(),fnSym.c_str(),alpha0,angularSampling);
    //system(buffer);

    MetaData md,mdOut,mdOut2;
    FileName fnMd,fnOut,fnOut2;
    fnMd = fnDir+"/angles_iter001_00.xmd";
    fnOut = fnDir+"/clusteringTendency.xmd";
    fnOut2 = fnDir+"/validation.xmd";
    size_t nSamplesRandom = 250;

    md.read(fnMd);
    size_t maxNImg;
    size_t sz = md.size();
    md.getValue(MDL_IMAGE_IDX,maxNImg,sz);

    String expression;
    MDRow rowP,row2;
    SymList SL;
    int symmetry, sym_order;
    SL.readSymmetryFile(fnSym.c_str());
    SL.isSymmetryGroup(fnSym.c_str(), symmetry, sym_order);

    double non_reduntant_area_of_sphere = SL.nonRedundantProjectionSphere(symmetry,sym_order);
    double area_of_sphere_no_symmetry = 4.*PI;
    double correction = std::sqrt(non_reduntant_area_of_sphere/area_of_sphere_no_symmetry);
    double validation = 0;

	MetaData tempMd;
	std::vector<double> sum_u(nSamplesRandom);
	//std::vector<double> sum_w(nSamplesRandom);
	double sum_w=0;
	std::vector<double> H0(nSamplesRandom);
	std::vector<double> H(nSamplesRandom);

	if (rank==0)
		init_progress_bar(maxNImg);

	for (size_t idx=0; idx<=maxNImg;idx++)
	{
		if ((idx+1)%Nprocessors==rank)
		{
			expression = formatString("imageIndex == %lu",idx);
			tempMd.importObjects(md, MDExpression(expression));

			if (tempMd.size()==0)
				continue;

			//compute H_0 from noise
			obtainSumU(tempMd,sum_u,H0);
			//compute H from experimental
			obtainSumW(tempMd,sum_w,sum_u,H,correction);

			std::sort(H0.begin(),H0.end());
			std::sort(H.begin(),H.end());

			double P = 0;
			for(size_t j=0; j<sum_u.size();j++)
				P += H0.at(j)/H.at(j);

			P /= (nSamplesRandom);
			rowP.setValue(MDL_IMAGE_IDX,idx);
			rowP.setValue(MDL_WEIGHT,P);
			mdPartial.addRow(rowP);

			//sum_u.clear();
			//sum_w.clear();
			//H0.clear();
			//H.clear();
			tempMd.clear();

			if (rank==0)
				progress_bar(idx+1);
		}
	}

	if (rank==0)
		progress_bar(maxNImg);

	synchronize();
	gatherClusterability();

	if (rank == 0)
	{
		mdPartial.write(fnOut);
		std::vector<double> P;
		mdPartial.getColumnValues(MDL_WEIGHT,P);
		for (size_t idx=0; idx< P.size();idx++)
		{
		if (P[idx] > 1)
				
			validation += 1;

	        }

		validation /= (maxNImg+1);

	}

    row2.setValue(MDL_IMAGE,fnInit);
    row2.setValue(MDL_WEIGHT,validation);
    mdOut2.addRow(row2);
    mdOut2.write(fnOut2);
}
void ProgValidationNonTilt::run()
{
    //Clustering Tendency and Cluster Validity Stephen D. Scott
    randomize_random_generator();
    MetaData md,mdGallery,mdOut,mdOut2,mdSort;
    MDRow row;

    FileName fnOut,fnOut2, fnGallery;
    fnOut = fnDir+"/clusteringTendency.xmd";
    fnGallery = fnDir+"/gallery.doc";
    fnOut2 = fnDir+"/validation.xmd";
    size_t nSamplesRandom = 500;

    md.read(fnParticles);
    mdGallery.read(fnGallery);
    mdSort.sort(md,MDL_IMAGE_IDX,true,-1,0);

    size_t maxNImg;
    size_t sz = md.size();

    if (useSignificant)
    	mdSort.getValue(MDL_IMAGE_IDX,maxNImg,sz);
    else
    {
    	mdSort.getValue(MDL_ITEM_ID,maxNImg,sz);
    }

    String expression;
    MDRow rowP,row2;
    SymList SL;
    int symmetry, sym_order;
    SL.readSymmetryFile(fnSym.c_str());
    SL.isSymmetryGroup(fnSym.c_str(), symmetry, sym_order);

/*
    double non_reduntant_area_of_sphere = SL.nonRedundantProjectionSphere(symmetry,sym_order);
    double area_of_sphere_no_symmetry = 4.*PI;
    double correction = std::sqrt(non_reduntant_area_of_sphere/area_of_sphere_no_symmetry);
*/
    double correction = 1;
    double validation = 0;
    double num_images = 0;

	MetaData tempMd;
	std::vector<double> sum_u(nSamplesRandom);
	double sum_w=0;
	std::vector<double> H0(nSamplesRandom);
	std::vector<double> H(nSamplesRandom);
	std::vector<double> p(nSamplesRandom);

	if (rank==0)
		init_progress_bar(maxNImg);

	for (size_t idx=0; idx<=maxNImg;idx++)
	{
		if ((idx)%Nprocessors==rank)
		{
			if (useSignificant)
				expression = formatString("imageIndex == %lu",idx);
			else
				expression = formatString("itemId == %lu",idx);

			tempMd.importObjects(md, MDExpression(expression));


			if (tempMd.size()==0)
				continue;

			//compute H_0 from noise
			obtainSumU_2(mdGallery, tempMd,sum_u,H0);
			//compute H from experimental
			obtainSumW(tempMd,sum_w,sum_u,H,correction);

			std::sort(H0.begin(),H0.end());
			std::sort(H.begin(),H.end());

			double P = 0;
			for(size_t j=0; j<sum_u.size();j++)
			{
				//P += H0.at(j)/H.at(j);
				P += H0.at(size_t((1-significance_noise)*nSamplesRandom))/H.at(j);
				p.at(j) = H0.at(j)/H.at(j);
			}

			P /= (nSamplesRandom);

			if (useSignificant)
				rowP.setValue(MDL_IMAGE_IDX,idx);
			else
				rowP.setValue(MDL_ITEM_ID,idx);

			rowP.setValue(MDL_WEIGHT,P);
			mdPartial.addRow(rowP);
			tempMd.clear();

			if (rank==0)
				progress_bar(idx+1);
		}
	}

	if (rank==0)
		progress_bar(maxNImg);

	synchronize();
	gatherClusterability();

	if (rank == 0)
	{
		mdPartial.write(fnOut);
		std::vector<double> P;
		mdPartial.getColumnValues(MDL_WEIGHT,P);

		for (size_t idx=0; idx< P.size();idx++)
		{
			if (P[idx] > 1)
				validation += 1.;
			num_images += 1.;
		}
		validation /= (num_images);

		row2.setValue(MDL_IMAGE,fnInit);
		row2.setValue(MDL_WEIGHT,validation);
		mdOut2.addRow(row2);
		mdOut2.write(fnOut2);
	}
}
void ParticlePolisherMpi::calculateAllSingleFrameReconstructionsAndBfactors()
{

	FileName fn_star = fn_in.withoutExtension() + "_" + fn_out + "_bfactors.star";
	if (!do_start_all_over && readStarFileBfactors(fn_star))
	{
		if (verb > 0)
			std::cout << " + " << fn_star << " already exists: skipping calculation average of per-frame B-factors." <<std::endl;
		return;
	}

	DOUBLE bfactor, offset, corr_coeff;

	int total_nr_frames = last_frame - first_frame + 1;
	long int my_first_frame, my_last_frame, my_nr_frames;

	// Loop over all frames (two halves for each frame!) to be included in the reconstruction
	// Each node does part of the work
	divide_equally(2*total_nr_frames, node->size, node->rank, my_first_frame, my_last_frame);
	my_nr_frames = my_last_frame - my_first_frame + 1;

	if (verb > 0)
	{
		std::cout << " + Calculating per-frame reconstructions ... " << std::endl;
		init_progress_bar(my_nr_frames);
	}

	for (long int i = my_first_frame; i <= my_last_frame; i++)
	{

		int iframe = (i >= total_nr_frames) ? i - total_nr_frames : i;
		iframe += first_frame;
		int ihalf = (i >= total_nr_frames) ? 2 : 1;

		calculateSingleFrameReconstruction(iframe, ihalf);

    	if (verb > 0)
    		progress_bar(i - my_first_frame + 1);
	}

	if (verb > 0)
	{
		progress_bar(my_nr_frames);
	}

	MPI_Barrier(MPI_COMM_WORLD);

	// Also calculate the average of all single-frames for both halves
    if (node->rank == 0)
    	calculateAverageAllSingleFrameReconstructions(1);
    else if (node->rank == 1)
    	calculateAverageAllSingleFrameReconstructions(2);

	// Wait until all reconstructions have been done, and calculate the B-factors per-frame
	MPI_Barrier(MPI_COMM_WORLD);

	calculateBfactorSingleFrameReconstruction(-1, bfactor, offset, corr_coeff); // FSC between the two averages, also reads mask

	MPI_Barrier(MPI_COMM_WORLD);

	// Loop over all frames (two halves for each frame!) to be included in the reconstruction
	// Each node does part of the work
	divide_equally(total_nr_frames, node->size, node->rank, my_first_frame, my_last_frame);
	my_nr_frames = my_last_frame - my_first_frame + 1;

	if (verb > 0)
	{
		std::cout << " + Calculating per-frame B-factors ... " << std::endl;
		init_progress_bar(my_nr_frames);
	}

	for (long int i = first_frame+my_first_frame; i <= first_frame+my_last_frame; i++)
	{

		calculateBfactorSingleFrameReconstruction(i, bfactor, offset, corr_coeff);
		int iframe = i - first_frame;
		DIRECT_A1D_ELEM(perframe_bfactors, iframe * 3 + 0) = bfactor;
       	DIRECT_A1D_ELEM(perframe_bfactors, iframe * 3 + 1) = offset;
       	DIRECT_A1D_ELEM(perframe_bfactors, iframe * 3 + 2) = corr_coeff;

    	if (verb > 0)
    		progress_bar(i - first_frame - my_first_frame + 1);
	}

	// Combine results from all nodes
	MultidimArray<DOUBLE> allnodes_perframe_bfactors;
	allnodes_perframe_bfactors.resize(perframe_bfactors);
	MPI_Allreduce(MULTIDIM_ARRAY(perframe_bfactors), MULTIDIM_ARRAY(allnodes_perframe_bfactors), MULTIDIM_SIZE(perframe_bfactors), MY_MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
	perframe_bfactors = allnodes_perframe_bfactors;

	if (verb > 0)
	{
		progress_bar(my_nr_frames);
		writeStarFileBfactors(fn_star);

	    // Also write a STAR file with the relative contributions of each frame to all frequencies
	    fn_star = fn_in.withoutExtension() + "_" + fn_out + "_relweights.star";
	    writeStarFileRelativeWeights(fn_star);
	}


}
void ProgAngularProjectLibrary::project_angle_vector (int my_init, int my_end, bool verbose)
{
    Projection P;
    FileName fn_proj;
    double rot,tilt,psi;
    int mySize;
    int numberStepsPsi = 1;

    mySize=my_end-my_init+1;
    if (psi_sampling < 360)
    {
        numberStepsPsi = (int) (359.99999/psi_sampling);
        mySize *= numberStepsPsi;
    }

    if (verbose)
        init_progress_bar(mySize);
    int myCounter=0;


    for (double mypsi=0;mypsi<360;mypsi += psi_sampling)
        for (int i=0;i<my_init;i++)
            myCounter++;

//    if (shears && XSIZE(inputVol())!=0 && VShears==NULL)
//        VShears=new RealShearsInfo(inputVol());
    if (projType == SHEARS && XSIZE(inputVol())!=0 && Vshears==NULL)
        Vshears=new RealShearsInfo(inputVol());
    if (projType == FOURIER && XSIZE(inputVol())!=0 && Vfourier==NULL)
        Vfourier=new FourierProjector(inputVol(),
        		                      paddFactor,
        		                      maxFrequency,
        		                      BSplineDeg);

    for (double mypsi=0;mypsi<360;mypsi += psi_sampling)
    {
        for (int i=my_init;i<=my_end;i++)
        {
            if (verbose)
                progress_bar(i-my_init);
            psi= mypsi+ZZ(mysampling.no_redundant_sampling_points_angles[i]);
            tilt=      YY(mysampling.no_redundant_sampling_points_angles[i]);
            rot=       XX(mysampling.no_redundant_sampling_points_angles[i]);

//            if (shears)
//                projectVolume(*VShears, P, Ydim, Xdim, rot,tilt,psi);
//            else
//                projectVolume(inputVol(), P, Ydim, Xdim, rot,tilt,psi);
            if (projType == SHEARS)
                projectVolume(*Vshears, P, Ydim, Xdim,   rot, tilt, psi);
            else if (projType == FOURIER)
                projectVolume(*Vfourier, P, Ydim, Xdim,  rot, tilt, psi);
            else if (projType == REALSPACE)
                projectVolume(inputVol(), P, Ydim, Xdim, rot, tilt, psi);


            P.setEulerAngles(rot,tilt,psi);
            P.setDataMode(_DATA_ALL);
            P.write(output_file,(size_t) (numberStepsPsi * i + mypsi +1),true,WRITE_REPLACE);
        }
    }
    if (verbose)
        progress_bar(mySize);
}
Exemple #19
0
void ProgSSNR::estimateSSNR(int dim, Matrix2D<double> &output)
{
    // These vectors are for 1D
    Matrix1D<double> S_S21D((int)(XSIZE(S()) / 2 - ring_width)),
    S_N21D((int)(XSIZE(S()) / 2 - ring_width)),
    K1D((int)(XSIZE(S()) / 2 - ring_width)),
    S_SSNR1D;
    Matrix1D<double> N_S21D((int)(XSIZE(S()) / 2 - ring_width)),
    N_N21D((int)(XSIZE(S()) / 2 - ring_width)),
    N_SSNR1D;

    // Selfile of the 2D images
    MetaData SF_individual;

    std::cerr << "Computing the SSNR ...\n";
    init_progress_bar(SF_S.size());
    int imgno = 1;
    Image<double> Is, In;
    Projection Iths, Ithn;
    MultidimArray< std::complex<double> > FFT_Is, FFT_Iths,  FFT_In, FFT_Ithn;
    MultidimArray<double> S2s, N2s, S2n, N2n;
    FileName fn_img;
    FourierTransformer FT(FFTW_BACKWARD);
    FourierProjector *Sprojector=NULL;
    FourierProjector *Nprojector=NULL;
    if (fourierProjections)
    {
    	Sprojector=new FourierProjector(S(),2,0.5,LINEAR);
    	Nprojector=new FourierProjector(N(),2,0.5,LINEAR);
    }
    FOR_ALL_OBJECTS_IN_METADATA2(SF_S, SF_N)
    {
    	double rot, tilt, psi;
    	SF_S.getValue(MDL_ANGLE_ROT,rot, __iter.objId);
    	SF_S.getValue(MDL_ANGLE_TILT,tilt,__iter.objId);
    	SF_S.getValue(MDL_ANGLE_PSI,psi,__iter.objId);
    	SF_S.getValue(MDL_IMAGE,fn_img,__iter.objId);
        Is.read(fn_img);
        Is().setXmippOrigin();
    	SF_N.getValue(MDL_IMAGE,fn_img,__iter2.objId);
        In.read(fn_img);
        In().setXmippOrigin();

        if (fourierProjections)
        {
        	projectVolume(*Sprojector, Iths, YSIZE(Is()), XSIZE(Is()), rot, tilt, psi);
        	projectVolume(*Nprojector, Ithn, YSIZE(Is()), XSIZE(Is()), rot, tilt, psi);
        }
        else
        {
			projectVolume(S(), Iths, YSIZE(Is()), XSIZE(Is()), rot, tilt, psi);
			projectVolume(N(), Ithn, YSIZE(Is()), XSIZE(Is()), rot, tilt, psi);
        }

#ifdef DEBUG

        Image<double> save;
        save() = Is();
        save.write("PPPread_signal.xmp");
        save() = In();
        save.write("PPPread_noise.xmp");
        save() = Iths();
        save.write("PPPtheo_signal.xmp");
        save() = Ithn();
        save.write("PPPtheo_noise.xmp");
#endif

        Is() -= Iths();
        In() -= Ithn(); // According to the article: should we not subtract here (simply remove this line)
                        // "...except that there is no subtraction in the denominator because the
        				// underlying signal is zero by definition."

        if (dim == 2)
        {
            FT.completeFourierTransform(Is(), FFT_Is);
            FT.completeFourierTransform(Iths(), FFT_Iths);
            FT.completeFourierTransform(In(), FFT_In);
            FT.completeFourierTransform(Ithn(), FFT_Ithn);
        }
        else
        {
            FT.FourierTransform(Is(), FFT_Is);
            FT.FourierTransform(Iths(), FFT_Iths);
            FT.FourierTransform(In(), FFT_In);
            FT.FourierTransform(Ithn(), FFT_Ithn);
        }

#ifdef DEBUG

        Image< std::complex<double> > savec;
        savec() = FFT_Is;
        savec.write("PPPFFTread_signal.xmp");
        savec() = FFT_In;
        savec.write("PPPFFTread_noise.xmp");
        savec() = FFT_Iths;
        savec.write("PPPFFTtheo_signal.xmp");
        savec() = FFT_Ithn;
        savec.write("PPPFFTtheo_noise.xmp");
#endif

        // Compute the amplitudes
        S2s.resizeNoCopy(FFT_Iths);
        N2s.resizeNoCopy(FFT_Iths);
        S2n.resizeNoCopy(FFT_Iths);
        N2n.resizeNoCopy(FFT_Iths);
        FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(FFT_Iths)
        {
            DIRECT_MULTIDIM_ELEM(S2s, n) = abs(DIRECT_MULTIDIM_ELEM(FFT_Iths, n));
            DIRECT_MULTIDIM_ELEM(S2s, n) *= DIRECT_MULTIDIM_ELEM(S2s, n);
            DIRECT_MULTIDIM_ELEM(N2s, n) = abs(DIRECT_MULTIDIM_ELEM(FFT_Is, n));
            DIRECT_MULTIDIM_ELEM(N2s, n) *= DIRECT_MULTIDIM_ELEM(N2s, n);
            DIRECT_MULTIDIM_ELEM(S2n, n) = abs(DIRECT_MULTIDIM_ELEM(FFT_Ithn, n));
            DIRECT_MULTIDIM_ELEM(S2n, n) *= DIRECT_MULTIDIM_ELEM(S2n, n);
            DIRECT_MULTIDIM_ELEM(N2n, n) = abs(DIRECT_MULTIDIM_ELEM(FFT_In, n));
            DIRECT_MULTIDIM_ELEM(N2n, n) *= DIRECT_MULTIDIM_ELEM(N2n, n);
        }

#ifdef DEBUG

        save() = S2s();
        save.write("PPPS2s.xmp");
        save() = N2s();
        save.write("PPPN2s.xmp");
        save() = S2n();
        save.write("PPPS2n.xmp");
        save() = N2n();
        save.write("PPPN2n.xmp");
#endif

        if (dim == 2)
        {
            // Compute the SSNR image
            Image<double> SSNR2D;
            SSNR2D().initZeros(S2s);
            const MultidimArray<double> & SSNR2Dmatrix=SSNR2D();
            FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(S2s)
            {
                double ISSNR = 0, alpha = 0, SSNR = 0;
                double aux = DIRECT_MULTIDIM_ELEM(N2s,n);
                if (aux > min_power)
                    ISSNR = DIRECT_MULTIDIM_ELEM(S2s,n) / aux;
                aux = DIRECT_MULTIDIM_ELEM(N2n,n);
                if (aux > min_power)
                    alpha = DIRECT_MULTIDIM_ELEM(S2n,n) / aux;
                if (alpha > min_power)
                {
                    aux = ISSNR / alpha - 1.0;
                    SSNR = XMIPP_MAX(aux, 0.0);
                }
                if (SSNR    > min_power)
                    DIRECT_MULTIDIM_ELEM(SSNR2Dmatrix,n) = 10.0 * log10(SSNR + 1.0);
            }
            CenterFFT(SSNR2D(), true);
#ifdef DEBUG

            save() = SSNR2Dmatrix;
            save.write("PPPSSNR2D.xmp");
#endif

            // Save image
            FileName fn_img_out;
            fn_img_out.compose(imgno, fn_out_images, "stk");
            SSNR2D.write(fn_img_out);
            size_t objId = SF_individual.addObject();
            SF_individual.setValue(MDL_IMAGE,fn_img_out,objId);
            SF_individual.setValue(MDL_ANGLE_ROT,rot,objId);
            SF_individual.setValue(MDL_ANGLE_TILT,tilt,objId);
            SF_individual.setValue(MDL_ANGLE_PSI,psi,objId);
        }