void MultiplyMD::execEventScalar(typename MDEventWorkspace<MDE, nd>::sptr ws) { // Get the scalar multiplying float scalar = float(m_rhs_scalar->dataY(0)[0]); float scalarError = float(m_rhs_scalar->dataE(0)[0]); float scalarRelativeErrorSquared = (scalarError * scalarError) / (scalar * scalar); // Get all the MDBoxes contained MDBoxBase<MDE,nd> * parentBox = ws->getBox(); std::vector<API::IMDNode *> boxes; parentBox->getBoxes(boxes, 1000, true); bool fileBackedTarget(false); Kernel::DiskBuffer *dbuff(NULL); if(ws->isFileBacked()) { fileBackedTarget = true; dbuff = ws->getBoxController()->getFileIO(); } for (size_t i=0; i<boxes.size(); i++) { MDBox<MDE,nd> * box = dynamic_cast<MDBox<MDE,nd> *>(boxes[i]); if (box) { typename std::vector<MDE> & events = box->getEvents(); size_t ic(events.size()); typename std::vector<MDE>::iterator it = events.begin(); typename std::vector<MDE>::iterator it_end = events.end(); for (; it != it_end; it++) { // Multiply weight by a scalar, propagating error float oldSignal = it->getSignal(); float signal = oldSignal * scalar; float errorSquared = signal * signal * (it->getErrorSquared() / (oldSignal * oldSignal) + scalarRelativeErrorSquared); it->setSignal(signal); it->setErrorSquared(errorSquared); } box->releaseEvents(); if(fileBackedTarget && ic>0) { Kernel::ISaveable *const pSaver(box->getISaveable()); dbuff->toWrite(pSaver); } } } // Recalculate the totals ws->refreshCache(); // Mark file-backed workspace as dirty ws->setFileNeedsUpdating(true); }
void CreateMDWorkspace::finish(typename MDEventWorkspace<MDE, nd>::sptr ws) { // ------------ Set up the box controller ---------------------------------- BoxController_sptr bc = ws->getBoxController(); this->setBoxController(bc); // Split to level 1 ws->splitBox(); // Do we split more due to MinRecursionDepth? int minDepth = this->getProperty("MinRecursionDepth"); if (minDepth<0) throw std::invalid_argument("MinRecursionDepth must be >= 0."); ws->setMinRecursionDepth(size_t(minDepth)); }
void CloneMDWorkspace::doClone(const typename MDEventWorkspace<MDE, nd>::sptr ws) { std::string outWSName = getPropertyValue("OutputWorkspace"); Progress prog(this, 0.0, 10.0, 100); BoxController_sptr bc = ws->getBoxController(); if (!bc) throw std::runtime_error("Error with InputWorkspace: no BoxController!"); if (bc->isFileBacked()) { // Generate a new filename to copy to prog.report("Copying File"); std::string originalFile = bc->getFilename(); std::string outFilename = getPropertyValue("Filename"); if (outFilename.empty()) { // Auto-generated name Poco::Path path = Poco::Path(originalFile).absolute(); std::string newName = path.getBaseName() + "_clone." + path.getExtension(); path.setFileName(newName); outFilename = path.toString(); } // Perform the copying g_log.notice() << "Cloned workspace file being copied to: " << outFilename << std::endl; Poco::File(originalFile).copyTo(outFilename); g_log.information() << "File copied successfully." << std::endl; // Now load it back IAlgorithm_sptr alg = createSubAlgorithm("LoadMD", 0.5, 1.0, false); alg->setPropertyValue("Filename", outFilename); alg->setPropertyValue("FileBackEnd", "1"); alg->setPropertyValue("Memory", "0"); //TODO: How much memory? alg->setPropertyValue("OutputWorkspace", outWSName); alg->executeAsSubAlg(); // Set the output workspace to this IMDEventWorkspace_sptr outWS = alg->getProperty("OutputWorkspace"); this->setProperty("OutputWorkspace", outWS); } else { // Perform the clone in memory. boost::shared_ptr<MDEventWorkspace<MDE,nd> > outWS(new MDEventWorkspace<MDE,nd>(*ws)); this->setProperty("OutputWorkspace", boost::dynamic_pointer_cast<IMDEventWorkspace>(outWS) ); } }
void SliceMD::slice(typename MDEventWorkspace<MDE, nd>::sptr ws) { // Create the ouput workspace typename MDEventWorkspace<OMDE, ond>::sptr outWS( new MDEventWorkspace<OMDE, ond>()); for (size_t od = 0; od < m_binDimensions.size(); od++) { outWS->addDimension(m_binDimensions[od]); } outWS->setCoordinateSystem(ws->getSpecialCoordinateSystem()); outWS->initialize(); // Copy settings from the original box controller BoxController_sptr bc = ws->getBoxController(); // store wrute buffer size for the future // uint64_t writeBufSize = // bc->getFileIO()getDiskBuffer().getWriteBufferSize(); // and disable write buffer (if any) for input MD Events for this algorithm // purposes; // bc->setCacheParameters(1,0); BoxController_sptr obc = outWS->getBoxController(); // Use the "number of bins" as the "split into" parameter for (size_t od = 0; od < m_binDimensions.size(); od++) obc->setSplitInto(od, m_binDimensions[od]->getNBins()); obc->setSplitThreshold(bc->getSplitThreshold()); bool bTakeDepthFromInputWorkspace = getProperty("TakeMaxRecursionDepthFromInput"); int tempDepth = getProperty("MaxRecursionDepth"); size_t maxDepth = bTakeDepthFromInputWorkspace ? bc->getMaxDepth() : size_t(tempDepth); obc->setMaxDepth(maxDepth); // size_t outputSize = writeBufSize; // obc->setCacheParameters(sizeof(OMDE),outputSize); obc->resetNumBoxes(); // Perform the first box splitting outWS->splitBox(); size_t lastNumBoxes = obc->getTotalNumMDBoxes(); // --- File back end ? ---------------- std::string filename = getProperty("OutputFilename"); if (!filename.empty()) { // First save to the NXS file g_log.notice() << "Running SaveMD to create file back-end" << std::endl; IAlgorithm_sptr alg = createChildAlgorithm("SaveMD"); alg->setPropertyValue("Filename", filename); alg->setProperty("InputWorkspace", outWS); alg->setProperty("MakeFileBacked", true); alg->executeAsChildAlg(); if (!obc->isFileBacked()) throw std::runtime_error("SliceMD with file-backed output: Can not set " "up file-backed output workspace "); auto IOptr = obc->getFileIO(); size_t outBufSize = IOptr->getWriteBufferSize(); // the buffer size for resulting workspace; reasonable size is at least 10 // data chunk sizes (nice to verify) if (outBufSize < 10 * IOptr->getDataChunk()) { outBufSize = 10 * IOptr->getDataChunk(); IOptr->setWriteBufferSize(outBufSize); } } // Function defining which events (in the input dimensions) to place in the // output MDImplicitFunction *function = this->getImplicitFunctionForChunk(NULL, NULL); std::vector<API::IMDNode *> boxes; // Leaf-only; no depth limit; with the implicit function passed to it. ws->getBox()->getBoxes(boxes, 1000, true, function); // Sort boxes by file position IF file backed. This reduces seeking time, // hopefully. bool fileBackedWS = bc->isFileBacked(); if (fileBackedWS) API::IMDNode::sortObjByID(boxes); Progress *prog = new Progress(this, 0.0, 1.0, boxes.size()); // The root of the output workspace MDBoxBase<OMDE, ond> *outRootBox = outWS->getBox(); // if target workspace has events, we should count them as added uint64_t totalAdded = outWS->getNEvents(); uint64_t numSinceSplit = 0; // Go through every box for this chunk. // PARALLEL_FOR_IF( !bc->isFileBacked() ) for (int i = 0; i < int(boxes.size()); i++) { MDBox<MDE, nd> *box = dynamic_cast<MDBox<MDE, nd> *>(boxes[i]); // Perform the binning in this separate method. if (box) { // An array to hold the rotated/transformed coordinates coord_t outCenter[ond]; const std::vector<MDE> &events = box->getConstEvents(); typename std::vector<MDE>::const_iterator it = events.begin(); typename std::vector<MDE>::const_iterator it_end = events.end(); for (; it != it_end; it++) { // Cache the center of the event (again for speed) const coord_t *inCenter = it->getCenter(); if (function->isPointContained(inCenter)) { // Now transform to the output dimensions m_transformFromOriginal->apply(inCenter, outCenter); // Create the event OMDE newEvent(it->getSignal(), it->getErrorSquared(), outCenter); // Copy extra data, if any copyEvent(*it, newEvent); // Add it to the workspace outRootBox->addEvent(newEvent); numSinceSplit++; } } box->releaseEvents(); // Ask BC if one needs to split boxes if (obc->shouldSplitBoxes(totalAdded, numSinceSplit, lastNumBoxes)) // if (numSinceSplit > 20000000 || (i == int(boxes.size()-1))) { // This splits up all the boxes according to split thresholds and sizes. Kernel::ThreadScheduler *ts = new ThreadSchedulerFIFO(); ThreadPool tp(ts); outWS->splitAllIfNeeded(ts); tp.joinAll(); // Accumulate stats totalAdded += numSinceSplit; numSinceSplit = 0; lastNumBoxes = obc->getTotalNumMDBoxes(); // Progress reporting if (!fileBackedWS) prog->report(i); } if (fileBackedWS) { if (!(i % 10)) prog->report(i); } } // is box } // for each box in the vector prog->report(); outWS->splitAllIfNeeded(NULL); // Refresh all cache. outWS->refreshCache(); g_log.notice() << totalAdded << " " << OMDE::getTypeName() << "'s added to the output workspace." << std::endl; if (outWS->isFileBacked()) { // Update the file-back-end g_log.notice() << "Running SaveMD" << std::endl; IAlgorithm_sptr alg = createChildAlgorithm("SaveMD"); alg->setProperty("UpdateFileBackEnd", true); alg->setProperty("InputWorkspace", outWS); alg->executeAsChildAlg(); } // return the size of the input workspace write buffer to its initial value // bc->setCacheParameters(sizeof(MDE),writeBufSize); this->setProperty("OutputWorkspace", boost::dynamic_pointer_cast<IMDEventWorkspace>(outWS)); delete prog; }
void BinMD::binByIterating(typename MDEventWorkspace<MDE, nd>::sptr ws) { BoxController_sptr bc = ws->getBoxController(); // store exisiting write buffer size for the future // uint64_t writeBufSize =bc->getDiskBuffer().getWriteBufferSize(); // and disable write buffer (if any) for input MD Events for this algorithm // purposes; // bc->setCacheParameters(1,0); // Cache some data to speed up accessing them a bit indexMultiplier = new size_t[m_outD]; for (size_t d = 0; d < m_outD; d++) { if (d > 0) indexMultiplier[d] = outWS->getIndexMultiplier()[d - 1]; else indexMultiplier[d] = 1; } signals = outWS->getSignalArray(); errors = outWS->getErrorSquaredArray(); numEvents = outWS->getNumEventsArray(); // Start with signal/error/numEvents at 0.0 outWS->setTo(0.0, 0.0, 0.0); // The dimension (in the output workspace) along which we chunk for parallel // processing // TODO: Find the smartest dimension to chunk against size_t chunkDimension = 0; // How many bins (in that dimension) per chunk. // Try to split it so each core will get 2 tasks: int chunkNumBins = int(m_binDimensions[chunkDimension]->getNBins() / (PARALLEL_GET_MAX_THREADS * 2)); if (chunkNumBins < 1) chunkNumBins = 1; // Do we actually do it in parallel? bool doParallel = getProperty("Parallel"); // Not if file-backed! if (bc->isFileBacked()) doParallel = false; if (!doParallel) chunkNumBins = int(m_binDimensions[chunkDimension]->getNBins()); // Total number of steps size_t progNumSteps = 0; if (prog) prog->setNotifyStep(0.1); if (prog) prog->resetNumSteps(100, 0.00, 1.0); // Run the chunks in parallel. There is no overlap in the output workspace so // it is thread safe to write to it.. // cppcheck-suppress syntaxError PRAGMA_OMP( parallel for schedule(dynamic,1) if (doParallel) ) for (int chunk = 0; chunk < int(m_binDimensions[chunkDimension]->getNBins()); chunk += chunkNumBins) { PARALLEL_START_INTERUPT_REGION // Region of interest for this chunk. std::vector<size_t> chunkMin(m_outD); std::vector<size_t> chunkMax(m_outD); for (size_t bd = 0; bd < m_outD; bd++) { // Same limits in the other dimensions chunkMin[bd] = 0; chunkMax[bd] = m_binDimensions[bd]->getNBins(); } // Parcel out a chunk in that single dimension dimension chunkMin[chunkDimension] = size_t(chunk); if (size_t(chunk + chunkNumBins) > m_binDimensions[chunkDimension]->getNBins()) chunkMax[chunkDimension] = m_binDimensions[chunkDimension]->getNBins(); else chunkMax[chunkDimension] = size_t(chunk + chunkNumBins); // Build an implicit function (it needs to be in the space of the // MDEventWorkspace) MDImplicitFunction *function = this->getImplicitFunctionForChunk(chunkMin.data(), chunkMax.data()); // Use getBoxes() to get an array with a pointer to each box std::vector<API::IMDNode *> boxes; // Leaf-only; no depth limit; with the implicit function passed to it. ws->getBox()->getBoxes(boxes, 1000, true, function); // Sort boxes by file position IF file backed. This reduces seeking time, // hopefully. if (bc->isFileBacked()) API::IMDNode::sortObjByID(boxes); // For progress reporting, the # of boxes if (prog) { PARALLEL_CRITICAL(BinMD_progress) { g_log.debug() << "Chunk " << chunk << ": found " << boxes.size() << " boxes within the implicit function.\n"; progNumSteps += boxes.size(); prog->setNumSteps(progNumSteps); } } // Go through every box for this chunk. for (auto &boxe : boxes) { MDBox<MDE, nd> *box = dynamic_cast<MDBox<MDE, nd> *>(boxe); // Perform the binning in this separate method. if (box && !box->getIsMasked()) this->binMDBox(box, chunkMin.data(), chunkMax.data()); // Progress reporting if (prog) prog->report(); // For early cancelling of the loop if (this->m_cancel) break; } // for each box in the vector PARALLEL_END_INTERUPT_REGION } // for each chunk in parallel PARALLEL_CHECK_INTERUPT_REGION // Now the implicit function if (implicitFunction) { if (prog) prog->report("Applying implicit function."); signal_t nan = std::numeric_limits<signal_t>::quiet_NaN(); outWS->applyImplicitFunction(implicitFunction, nan, nan); } // return the size of the input workspace write buffer to its initial value // bc->setCacheParameters(sizeof(MDE),writeBufSize); }
void LoadMD::doLoad(typename MDEventWorkspace<MDE, nd>::sptr ws) { // Are we using the file back end? bool fileBackEnd = getProperty("FileBackEnd"); if (fileBackEnd && m_BoxStructureAndMethadata) throw std::invalid_argument("Combination of BoxStructureOnly or " "MetaDataOnly were set to TRUE with " "fileBackEnd " ": this is not possible."); CPUTimer tim; auto prog = new Progress(this, 0.0, 1.0, 100); prog->report("Opening file."); std::string title; try { m_file->getAttr("title", title); } catch (std::exception &) { // Leave the title blank if error on loading } ws->setTitle(title); // Load the WorkspaceHistory "process" if (this->getProperty("LoadHistory")) { ws->history().loadNexus(m_file.get()); } this->loadAffineMatricies(boost::dynamic_pointer_cast<IMDWorkspace>(ws)); m_file->closeGroup(); m_file->close(); // Add each of the dimension for (size_t d = 0; d < nd; d++) ws->addDimension(m_dims[d]); // Coordinate system ws->setCoordinateSystem(m_coordSystem); // ----------------------------------------- Box Structure // ------------------------------ prog->report("Reading box structure from HDD."); MDBoxFlatTree FlatBoxTree; int nDims = static_cast<int>(nd); // should be safe FlatBoxTree.loadBoxStructure(m_filename, nDims, MDE::getTypeName()); BoxController_sptr bc = ws->getBoxController(); bc->fromXMLString(FlatBoxTree.getBCXMLdescr()); prog->report("Restoring box structure and connectivity"); std::vector<API::IMDNode *> boxTree; FlatBoxTree.restoreBoxTree(boxTree, bc, fileBackEnd, m_BoxStructureAndMethadata); size_t numBoxes = boxTree.size(); // ---------------------------------------- DEAL WITH BOXES // ------------------------------------ if (fileBackEnd) { // TODO:: call to the file format factory auto loader = boost::shared_ptr<API::IBoxControllerIO>( new DataObjects::BoxControllerNeXusIO(bc.get())); loader->setDataType(sizeof(coord_t), MDE::getTypeName()); bc->setFileBacked(loader, m_filename); // boxes have been already made file-backed when restoring the boxTree; // How much memory for the cache? { // TODO: Clean up, only a write buffer now double mb = getProperty("Memory"); // Defaults have changed, default disk buffer size should be 10 data // chunks TODO: find optimal, 100 may be better. if (mb <= 0) mb = double(10 * loader->getDataChunk() * sizeof(MDE)) / double(1024 * 1024); // Express the cache memory in units of number of events. uint64_t cacheMemory = static_cast<uint64_t>((mb * 1024. * 1024.) / sizeof(MDE)) + 1; // Set these values in the diskMRU bc->getFileIO()->setWriteBufferSize(cacheMemory); g_log.information() << "Setting a DiskBuffer cache size of " << mb << " MB, or " << cacheMemory << " events.\n"; } } // Not file back end else if (!m_BoxStructureAndMethadata) { // ---------------------------------------- READ IN THE BOXES // ------------------------------------ // TODO:: call to the file format factory auto loader = file_holder_type(new DataObjects::BoxControllerNeXusIO(bc.get())); loader->setDataType(sizeof(coord_t), MDE::getTypeName()); loader->openFile(m_filename, "r"); const std::vector<uint64_t> &BoxEventIndex = FlatBoxTree.getEventIndex(); prog->setNumSteps(numBoxes); for (size_t i = 0; i < numBoxes; i++) { prog->report(); MDBox<MDE, nd> *box = dynamic_cast<MDBox<MDE, nd> *>(boxTree[i]); if (!box) continue; if (BoxEventIndex[2 * i + 1] > 0) // Load in memory NOT using the file as the back-end, { boxTree[i]->reserveMemoryForLoad(BoxEventIndex[2 * i + 1]); boxTree[i]->loadAndAddFrom( loader.get(), BoxEventIndex[2 * i], static_cast<size_t>(BoxEventIndex[2 * i + 1])); } } loader->closeFile(); } else // box structure and metadata only { } g_log.debug() << tim << " to create all the boxes and fill them with events.\n"; // Box of ID 0 is the head box. ws->setBox(boxTree[0]); // Make sure the max ID is ok for later ID generation bc->setMaxId(numBoxes); // end-of bMetaDataOnly // Refresh cache // TODO:if(!fileBackEnd)ws->refreshCache(); ws->refreshCache(); g_log.debug() << tim << " to refreshCache(). " << ws->getNPoints() << " points after refresh.\n"; g_log.debug() << tim << " to finish up.\n"; delete prog; }
void SaveMD::doSaveEvents(typename MDEventWorkspace<MDE, nd>::sptr ws) { std::string filename = getPropertyValue("Filename"); bool update = getProperty("UpdateFileBackEnd"); bool MakeFileBacked = getProperty("MakeFileBacked"); bool wsIsFileBacked = ws->isFileBacked(); if (update && MakeFileBacked) throw std::invalid_argument( "Please choose either UpdateFileBackEnd or MakeFileBacked, not both."); if (MakeFileBacked && wsIsFileBacked) throw std::invalid_argument( "You picked MakeFileBacked but the workspace is already file-backed!"); BoxController_sptr bc = ws->getBoxController(); if (!wsIsFileBacked) { // Erase the file if it exists Poco::File oldFile(filename); if (oldFile.exists()) oldFile.remove(); } auto prog = new Progress(this, 0.0, 0.05, 1); if (update) // workspace has its own file and ignores any changes to the // algorithm parameters { if (!ws->isFileBacked()) throw std::runtime_error(" attempt to update non-file backed workspace"); filename = bc->getFileIO()->getFileName(); } //----------------------------------------------------------------------------------------------------- // create or open WS group and put there additional information about WS and // its dimensions int nDims = static_cast<int>(nd); bool data_exist; auto file = file_holder_type(MDBoxFlatTree::createOrOpenMDWSgroup( filename, nDims, MDE::getTypeName(), false, data_exist)); // Save each NEW ExperimentInfo to a spot in the file MDBoxFlatTree::saveExperimentInfos(file.get(), ws); if (!update || !data_exist) { MDBoxFlatTree::saveWSGenericInfo(file.get(), ws); } file->closeGroup(); file->close(); MDBoxFlatTree BoxFlatStruct; //----------------------------------------------------------------------------------------------------- if (update) // the workspace is already file backed; { // remove all boxes from the DiskBuffer. DB will calculate boxes positions // on HDD. bc->getFileIO()->flushCache(); // flatten the box structure; this will remember boxes file positions in the // box structure BoxFlatStruct.initFlatStructure(ws, filename); } else // not file backed; { // the boxes file positions are unknown and we need to calculate it. BoxFlatStruct.initFlatStructure(ws, filename); // create saver class auto Saver = boost::shared_ptr<API::IBoxControllerIO>( new DataObjects::BoxControllerNeXusIO(bc.get())); Saver->setDataType(sizeof(coord_t), MDE::getTypeName()); if (MakeFileBacked) { // store saver with box controller bc->setFileBacked(Saver, filename); // get access to boxes array std::vector<API::IMDNode *> &boxes = BoxFlatStruct.getBoxes(); // calculate the position of the boxes on file, indicating to make them // saveable and that the boxes were not saved. BoxFlatStruct.setBoxesFilePositions(true); prog->resetNumSteps(boxes.size(), 0.06, 0.90); for (auto &boxe : boxes) { auto saveableTag = boxe->getISaveable(); if (saveableTag) // only boxes can be saveable { // do not spend time on empty boxes if (boxe->getDataInMemorySize() == 0) continue; // save boxes directly using the boxes file postion, precalculated in // boxFlatStructure. saveableTag->save(); // remove boxes data from memory. This will actually correctly set the // tag indicatin that data were not loaded. saveableTag->clearDataFromMemory(); // put boxes into write buffer wich will save them when necessary // Saver->toWrite(saveTag); prog->report("Saving Box"); } } // remove everything from diskBuffer; (not sure if it really necessary // but just in case , should not make any harm) Saver->flushCache(); // drop NeXus on HDD (not sure if it really necessary but just in case ) Saver->flushData(); } else // just save data, and finish with it { Saver->openFile(filename, "w"); BoxFlatStruct.setBoxesFilePositions(false); std::vector<API::IMDNode *> &boxes = BoxFlatStruct.getBoxes(); std::vector<uint64_t> &eventIndex = BoxFlatStruct.getEventIndex(); prog->resetNumSteps(boxes.size(), 0.06, 0.90); for (size_t i = 0; i < boxes.size(); i++) { if (eventIndex[2 * i + 1] == 0) continue; boxes[i]->saveAt(Saver.get(), eventIndex[2 * i]); prog->report("Saving Box"); } Saver->closeFile(); } } // -------------- Save Box Structure ------------------------------------- // OK, we've filled these big arrays of data representing flat box structrre. // Save them. progress(0.91, "Writing Box Data"); prog->resetNumSteps(8, 0.92, 1.00); // Save box structure; BoxFlatStruct.saveBoxStructure(filename); delete prog; ws->setFileNeedsUpdating(false); }
void BinToMDHistoWorkspace::do_centerpointBin(typename MDEventWorkspace<MDE, nd>::sptr ws) { bool DODEBUG = true; CPUTimer tim; // Number of output binning dimensions found size_t outD = binDimensions.size(); //Since the costs are not known ahead of time, use a simple FIFO buffer. ThreadScheduler * ts = new ThreadSchedulerFIFO(); // Create the threadpool with: all CPUs, a progress reporter ThreadPool tp(ts, 0, prog); // Big efficiency gain is obtained by grouping a few bins per task. size_t binsPerTask = 100; // For progress reporting, the approx # of tasks if (prog) prog->setNumSteps( int(outWS->getNPoints()/100) ); // The root-level box. IMDBox<MDE,nd> * rootBox = ws->getBox(); // This is the limit to loop over in each dimension size_t * index_max = new size_t[outD]; for (size_t bd=0; bd<outD; bd++) index_max[bd] = binDimensions[bd]->getNBins(); // Cache a calculation to convert indices x,y,z,t into a linear index. size_t * index_maker = new size_t[outD]; Utils::NestedForLoop::SetUpIndexMaker(outD, index_maker, index_max); int numPoints = int(outWS->getNPoints()); // Run in OpenMP with dynamic scheduling and a smallish chunk size (binsPerTask) // Right now, not parallel for file-backed systems. bool fileBacked = (ws->getBoxController()->getFile() != NULL); PRAGMA_OMP(parallel for schedule(dynamic, binsPerTask) if (!fileBacked) ) for (int i=0; i < numPoints; i++) { PARALLEL_START_INTERUPT_REGION size_t linear_index = size_t(i); // nd >= outD in all cases so this is safe. size_t index[nd]; // Get the index at each dimension for this bin. Utils::NestedForLoop::GetIndicesFromLinearIndex(outD, linear_index, index_maker, index_max, index); // Construct the bin and its coordinates MDBin<MDE,nd> bin; for (size_t bd=0; bd<outD; bd++) { // Index in this binning dimension (i_x, i_y, etc.) size_t idx = index[bd]; // Dimension in the MDEventWorkspace size_t d = dimensionToBinFrom[bd]; // Corresponding extents bin.m_min[d] = binDimensions[bd]->getX(idx); bin.m_max[d] = binDimensions[bd]->getX(idx+1); } bin.m_index = linear_index; bool dimensionsUsed[nd]; for (size_t d=0; d<nd; d++) dimensionsUsed[d] = (d<3); // Check if the bin is in the ImplicitFunction (if any) bool binContained = true; if (implicitFunction) { binContained = implicitFunction->isPointContained(bin.m_min); //TODO. Correct argument passed to this method? } if (binContained) { // Array of bools set to true when a dimension is fully contained (binary splitting only) bool fullyContained[nd]; for (size_t d=0; d<nd; d++) fullyContained[d] = false; // This will recursively bin into the sub grids rootBox->centerpointBin(bin, fullyContained); // Save the data into the dense histogram outWS->setSignalAt(linear_index, bin.m_signal); outWS->setErrorAt(linear_index, bin.m_errorSquared); } // Report progress but not too often. if (((linear_index % 100) == 0) && prog ) prog->report(); PARALLEL_END_INTERUPT_REGION } // (for each linear index) PARALLEL_CHECK_INTERUPT_REGION if (DODEBUG) std::cout << tim << " to run the openmp loop.\n"; delete [] index_max; delete [] index_maker; }
void BinToMDHistoWorkspace::binByIterating(typename MDEventWorkspace<MDE, nd>::sptr ws) { BoxController_sptr bc = ws->getBoxController(); // Start with signal at 0.0 outWS->setTo(0.0, 0.0); // Cache some data to speed up accessing them a bit indexMultiplier = new size_t[outD]; for (size_t d=0; d<outD; d++) { if (d > 0) indexMultiplier[d] = outWS->getIndexMultiplier()[d-1]; else indexMultiplier[d] = 1; } signals = outWS->getSignalArray(); errors = outWS->getErrorSquaredArray(); // The dimension (in the output workspace) along which we chunk for parallel processing // TODO: Find the smartest dimension to chunk against size_t chunkDimension = 0; // How many bins (in that dimension) per chunk. // Try to split it so each core will get 2 tasks: int chunkNumBins = int(binDimensions[chunkDimension]->getNBins() / (Mantid::Kernel::ThreadPool::getNumPhysicalCores() * 2)); if (chunkNumBins < 1) chunkNumBins = 1; // Do we actually do it in parallel? bool doParallel = getProperty("Parallel"); // Not if file-backed! if (bc->isFileBacked()) doParallel = false; if (!doParallel) chunkNumBins = int(binDimensions[chunkDimension]->getNBins()); // Total number of steps size_t progNumSteps = 0; if (prog) prog->setNotifyStep(0.1); if (prog) prog->resetNumSteps(100, 0.00, 1.0); // Run the chunks in parallel. There is no overlap in the output workspace so it is // thread safe to write to it.. PRAGMA_OMP( parallel for schedule(dynamic,1) if (doParallel) ) for(int chunk=0; chunk < int(binDimensions[chunkDimension]->getNBins()); chunk += chunkNumBins) { PARALLEL_START_INTERUPT_REGION // Region of interest for this chunk. size_t * chunkMin = new size_t[outD]; size_t * chunkMax = new size_t[outD]; for (size_t bd=0; bd<outD; bd++) { // Same limits in the other dimensions chunkMin[bd] = 0; chunkMax[bd] = binDimensions[bd]->getNBins(); } // Parcel out a chunk in that single dimension dimension chunkMin[chunkDimension] = size_t(chunk); if (size_t(chunk+chunkNumBins) > binDimensions[chunkDimension]->getNBins()) chunkMax[chunkDimension] = binDimensions[chunkDimension]->getNBins(); else chunkMax[chunkDimension] = size_t(chunk+chunkNumBins); // Build an implicit function (it needs to be in the space of the MDEventWorkspace) MDImplicitFunction * function = this->getImplicitFunctionForChunk(chunkMin, chunkMax); // Use getBoxes() to get an array with a pointer to each box std::vector<IMDBox<MDE,nd>*> boxes; // Leaf-only; no depth limit; with the implicit function passed to it. ws->getBox()->getBoxes(boxes, 1000, true, function); // Sort boxes by file position IF file backed. This reduces seeking time, hopefully. if (bc->isFileBacked()) IMDBox<MDE, nd>::sortBoxesByFilePos(boxes); // For progress reporting, the # of boxes if (prog) { PARALLEL_CRITICAL(BinToMDHistoWorkspace_progress) { std::cout << "Chunk " << chunk << ": found " << boxes.size() << " boxes within the implicit function." << std::endl; progNumSteps += boxes.size(); prog->setNumSteps( progNumSteps ); } } // Go through every box for this chunk. for (size_t i=0; i<boxes.size(); i++) { MDBox<MDE,nd> * box = dynamic_cast<MDBox<MDE,nd> *>(boxes[i]); // Perform the binning in this separate method. if (box) this->binMDBox(box, chunkMin, chunkMax); // Progress reporting if (prog) prog->report(); }// for each box in the vector PARALLEL_END_INTERUPT_REGION } // for each chunk in parallel PARALLEL_CHECK_INTERUPT_REGION // Now the implicit function if (implicitFunction) { prog->report("Applying implicit function."); signal_t nan = std::numeric_limits<signal_t>::quiet_NaN(); outWS->applyImplicitFunction(implicitFunction, nan, nan); } }