void ConvToMDEventsWS::runConversion(API::Progress *pProgress) { // Get the box controller Mantid::API::BoxController_sptr bc = m_OutWSWrapper->pWorkspace()->getBoxController(); size_t lastNumBoxes = bc->getTotalNumMDBoxes(); size_t nEventsInWS = m_OutWSWrapper->pWorkspace()->getNPoints(); // Is the access to input events thread-safe? // bool MultiThreadedAdding = m_EventWS->threadSafe(); // preprocessed detectors insure that each detector has its own spectra size_t nValidSpectra = m_NSpectra; //--->>> Thread control stuff Kernel::ThreadSchedulerFIFO *ts(NULL); int nThreads(m_NumThreads); if (nThreads < 0) nThreads = 0; // negative m_NumThreads correspond to all cores used, 0 no // threads and positive number -- nThreads requested; bool runMultithreaded = false; if (m_NumThreads != 0) { runMultithreaded = true; // Create the thread pool that will run all of these. It will be deleted by // the threadpool ts = new Kernel::ThreadSchedulerFIFO(); // it will initiate thread pool with number threads or machine's cores (0 in // tp constructor) pProgress->resetNumSteps(nValidSpectra, 0, 1); } Kernel::ThreadPool tp(ts, nThreads, new API::Progress(*pProgress)); //<<<-- Thread control stuff // if any property dimension is outside of the data range requested, the job // is done; if (!m_QConverter->calcGenericVariables(m_Coord, m_NDims)) return; size_t eventsAdded = 0; for (size_t wi = 0; wi < nValidSpectra; wi++) { size_t nConverted = this->conversionChunk(wi); eventsAdded += nConverted; nEventsInWS += nConverted; // Give this task to the scheduler //%double cost = double(el.getNumberEvents()); // ts->push( new FunctionTask( func, cost) ); // Keep a running total of how many events we've added if (bc->shouldSplitBoxes(nEventsInWS, eventsAdded, lastNumBoxes)) { if (runMultithreaded) { // Do all the adding tasks tp.joinAll(); // Now do all the splitting tasks m_OutWSWrapper->pWorkspace()->splitAllIfNeeded(ts); if (ts->size() > 0) tp.joinAll(); } else { m_OutWSWrapper->pWorkspace()->splitAllIfNeeded( NULL); // it is done this way as it is possible trying to do single // threaded split more efficiently } // Count the new # of boxes. lastNumBoxes = m_OutWSWrapper->pWorkspace() ->getBoxController() ->getTotalNumMDBoxes(); eventsAdded = 0; pProgress->report(wi); } } // Do a final splitting of everything if (runMultithreaded) { tp.joinAll(); m_OutWSWrapper->pWorkspace()->splitAllIfNeeded(ts); tp.joinAll(); } else { m_OutWSWrapper->pWorkspace()->splitAllIfNeeded(NULL); } // Recount totals at the end. m_OutWSWrapper->pWorkspace()->refreshCache(); // m_OutWSWrapper->refreshCentroid(); pProgress->report(); /// Set the special coordinate system flag on the output workspace. m_OutWSWrapper->pWorkspace()->setCoordinateSystem(m_coordinateSystem); }
/** run conversion as multithread job*/ void ConvToMDHistoWS::runConversion(API::Progress *pProgress) { // counder for the number of events size_t nAddedEvents(0); // Mantid::API::BoxController_sptr bc = m_OutWSWrapper->pWorkspace()->getBoxController(); size_t lastNumBoxes = bc->getTotalNumMDBoxes(); size_t nEventsInWS = m_OutWSWrapper->pWorkspace()->getNPoints(); // const size_t specSize = m_InWS2D->blocksize(); // preprocessed detectors associate each spectra with a detector (position) size_t nValidSpectra = m_NSpectra; // if any property dimension is outside of the data range requested, the job // is done; if (!m_QConverter->calcGenericVariables(m_Coord, m_NDims)) return; //--->>> Thread control stuff Kernel::ThreadSchedulerFIFO *ts(nullptr); int nThreads(m_NumThreads); if (nThreads < 0) nThreads = 0; // negative m_NumThreads correspond to all cores used, 0 no // threads and positive number -- nThreads requested; bool runMultithreaded = false; if (m_NumThreads != 0) { runMultithreaded = true; // Create the thread pool that will run all of these. It will be deleted by // the threadpool ts = new Kernel::ThreadSchedulerFIFO(); // it will initiate thread pool with number threads or machine's cores (0 in // tp constructor) pProgress->resetNumSteps(nValidSpectra, 0, 1); } Kernel::ThreadPool tp(ts, nThreads, new API::Progress(*pProgress)); //<<<-- Thread control stuff if (runMultithreaded) nThreads = static_cast<int>(tp.getNumPhysicalCores()); else nThreads = 1; // estimate the size of data conversion a single thread should perform // TO DO: this piece of code should be carefully rethinked size_t eventsChunkNum = bc->getSignificantEventsNumber(); this->estimateThreadWork(nThreads, specSize, eventsChunkNum); // External loop over the spectra: for (size_t i = 0; i < nValidSpectra; i += m_spectraChunk) { size_t nThreadEv = this->conversionChunk(i); nAddedEvents += nThreadEv; nEventsInWS += nThreadEv; if (bc->shouldSplitBoxes(nEventsInWS, nAddedEvents, lastNumBoxes)) { if (runMultithreaded) { // Do all the adding tasks tp.joinAll(); // Now do all the splitting tasks m_OutWSWrapper->pWorkspace()->splitAllIfNeeded(ts); if (ts->size() > 0) tp.joinAll(); } else { m_OutWSWrapper->pWorkspace()->splitAllIfNeeded( nullptr); // it is done this way as it is possible trying to do // single // threaded split more efficiently } // Count the new # of boxes. lastNumBoxes = bc->getTotalNumMDBoxes(); nAddedEvents = 0; pProgress->report(i, "Adding Events"); } // TODO:: // if (m_OutWSWrapper->ifNeedsSplitting()) //{ // // Do all the adding tasks // //tp.joinAll(); // // Now do all the splitting tasks // //m_OutWSWrapper->pWorkspace()->splitAllIfNeeded(ts); // m_OutWSWrapper->splitList(ts); // //if (ts->size() > 0) tp.joinAll(); // // Count the new # of boxes. // lastNumBoxes = // m_OutWSWrapper->pWorkspace()->getBoxController()->getTotalNumMDBoxes(); //} // pProgress->report(i); } // end detectors loop; // Do a final splitting of everything if (runMultithreaded) { tp.joinAll(); m_OutWSWrapper->pWorkspace()->splitAllIfNeeded(ts); tp.joinAll(); } else { m_OutWSWrapper->pWorkspace()->splitAllIfNeeded(nullptr); } m_OutWSWrapper->pWorkspace()->refreshCache(); // m_OutWSWrapper->refreshCentroid(); pProgress->report(); /// Set the special coordinate system flag on the output workspace. m_OutWSWrapper->pWorkspace()->setCoordinateSystem(m_coordinateSystem); }