Int_t mp001_fillHistos(UInt_t nWorkers = 4) { // Total amount of numbers const UInt_t nNumbers = 20000000U; // We define our work item auto workItem = [nNumbers](UInt_t workerID) { // One generator, file and ntuple per worker TRandom3 workerRndm(workerID); // Change the seed TFile f(Form("myFile_%u.root", workerID), "RECREATE"); TH1F h(Form("myHisto_%u", workerID), "The Histogram", 64, -4, 4); for (UInt_t i = 0; i < nNumbers; ++i) { h.Fill(workerRndm.Gaus()); } h.Write(); return 0; }; // Create the pool of workers TProcPool workers(nWorkers); // Fill the pool with work std::forward_list<UInt_t> workerIDs(nWorkers); std::iota(std::begin(workerIDs), std::end(workerIDs), 0); workers.Map(workItem, workerIDs); return 0; }
bool TaskSchedulerImpl::Initialize(std::size_t workerCount) { if (IsInitialized()) return true; // Déjà initialisé #if NAZARA_CORE_SAFE if (workerCount == 0) { NazaraError("Invalid worker count ! (0)"); return false; } #endif s_workerCount = workerCount; s_doneEvents.reset(new HANDLE[workerCount]); s_workers.reset(new Worker[workerCount]); s_workerThreads.reset(new HANDLE[workerCount]); // L'identifiant de chaque worker doit rester en vie jusqu'à ce que chaque thread soit correctement lancé std::unique_ptr<std::size_t[]> workerIDs(new std::size_t[workerCount]); for (std::size_t i = 0; i < workerCount; ++i) { // On initialise les évènements, mutex et threads de chaque worker Worker& worker = s_workers[i]; InitializeCriticalSection(&worker.queueMutex); worker.wakeEvent = CreateEventW(nullptr, false, false, nullptr); worker.running = true; worker.workCount = 0; s_doneEvents[i] = CreateEventW(nullptr, true, false, nullptr); // Le thread va se lancer, signaler qu'il est prêt à travailler (s_doneEvents) et attendre d'être réveillé workerIDs[i] = i; s_workerThreads[i] = reinterpret_cast<HANDLE>(_beginthreadex(nullptr, 0, &WorkerProc, &workerIDs[i], 0, nullptr)); } // On attend que les workers se mettent en attente WaitForMultipleObjects(s_workerCount, &s_doneEvents[0], true, INFINITE); return true; }
Int_t mt102_readNtuplesFillHistosAndFit() { // No nuisance for batch execution gROOT->SetBatch(); // Perform the operation sequentially --------------------------------------- TChain inputChain("multiCore"); inputChain.Add("mc101_multiCore_*.root"); TH1F outHisto("outHisto", "Random Numbers", 128, -4, 4); { TimerRAII t("Sequential read and fit"); inputChain.Draw("r >> outHisto"); outHisto.Fit("gaus"); } // We now go MT! ------------------------------------------------------------ // The first, fundamental operation to be performed in order to make ROOT // thread-aware. ROOT::EnableMT(); // We adapt our parallelisation to the number of input files const auto nFiles = inputChain.GetListOfFiles()->GetEntries(); std::forward_list<UInt_t> workerIDs(nFiles); std::iota(std::begin(workerIDs), std::end(workerIDs), 0); // We define the histograms we'll fill std::vector<TH1F> histograms; histograms.reserve(nFiles); for (auto workerID : workerIDs){ histograms.emplace_back(TH1F(Form("outHisto_%u", workerID), "Random Numbers", 128, -4, 4)); } // We define our work item auto workItem = [&histograms](UInt_t workerID) { TFile f(Form("mc101_multiCore_%u.root", workerID)); TNtuple *ntuple = nullptr; f.GetObject("multiCore", ntuple); auto &histo = histograms.at(workerID); for (UInt_t index = 0; index < ntuple->GetEntriesFast(); ++index) { ntuple->GetEntry(index); histo.Fill(ntuple->GetArgs()[0]); } }; TH1F sumHistogram("SumHisto", "Random Numbers", 128, -4, 4); // Create the collection which will hold the threads, our "pool" std::vector<std::thread> workers; // We measure time here as well { TimerRAII t("Parallel execution"); // Spawn workers // Fill the "pool" with workers for (auto workerID : workerIDs) { workers.emplace_back(workItem, workerID); } // Now join them for (auto&& worker : workers) worker.join(); // And reduce std::for_each(std::begin(histograms), std::end(histograms), [&sumHistogram](const TH1F & h) { sumHistogram.Add(&h); }); sumHistogram.Fit("gaus",0); } return 0; }