void l1menu::TriggerRatePlot::addSample( const l1menu::ISample& sample, std::vector<TriggerRatePlot>& ratePlots ) { float weightPerEvent=sample.eventRate()/sample.sumOfWeights(); // Create cached triggers for each of the rate plots, which depending on the concrete type // of the ISample may or may not significantly increase the speed at which this next loop happens. std::vector< std::unique_ptr<l1menu::ICachedTrigger> > cachedTriggers; for( const auto& ratePlot : ratePlots ) cachedTriggers.push_back( sample.createCachedTrigger( *ratePlot.pTrigger_ ) ); // Now instead of calling addSample() for each TriggerRatePlot individually, get each IEvent from the sample // and pass that to each rate plot. This is because (depending on the ISample concrete type) getting the // IEvent can be computationally expensive. std::vector< std::unique_ptr<l1menu::ICachedTrigger> >::const_iterator iTrigger; std::vector<TriggerRatePlot>::iterator iRatePlot; for( size_t eventNumber=0; eventNumber<sample.numberOfEvents(); ++eventNumber ) { const l1menu::IEvent& event=sample.getEvent(eventNumber); for( iTrigger=cachedTriggers.begin(), iRatePlot=ratePlots.begin(); iTrigger!=cachedTriggers.end() && iRatePlot!=ratePlots.end(); ++iTrigger, ++iRatePlot ) { iRatePlot->addEvent( event, *iTrigger, weightPerEvent ); } } // end of loop over events }
void l1menu::TriggerRatePlot::addSample( const l1menu::ISample& sample ) { float weightPerEvent=sample.eventRate()/sample.sumOfWeights(); // Create a cached trigger, which depending on the concrete type of the ISample // may or may not significantly increase the speed at which this next loop happens. std::unique_ptr<l1menu::ICachedTrigger> pCachedTrigger=sample.createCachedTrigger( *pTrigger_ ); for( size_t eventNumber=0; eventNumber<sample.numberOfEvents(); ++eventNumber ) { addEvent( sample.getEvent(eventNumber), pCachedTrigger, weightPerEvent ); } // end of loop over events }
l1menu::implementation::MenuRateImplementation::MenuRateImplementation( const l1menu::TriggerMenu& menu, const l1menu::ISample& sample ) { // The sum of event weights that pass each trigger std::vector<float> weightOfEventsPassed( menu.numberOfTriggers() ); // The sume of weights squared that pass each trigger. Used to calculate the error. std::vector<float> weightSquaredOfEventsPassed( menu.numberOfTriggers() ); // The number of events that only pass the given trigger std::vector<float> weightOfEventsPure( menu.numberOfTriggers() ); std::vector<float> weightSquaredOfEventsPure( menu.numberOfTriggers() ); float weightOfEventsPassingAnyTrigger=0; float weightSquaredOfEventsPassingAnyTrigger=0; float weightOfAllEvents=0; // Using cached triggers significantly increases speed for ReducedSample // because it cuts out expensive string comparisons when querying the trigger // parameters. std::vector< std::unique_ptr<l1menu::ICachedTrigger> > cachedTriggers; for( size_t triggerNumber=0; triggerNumber<menu.numberOfTriggers(); ++triggerNumber ) { cachedTriggers.push_back( sample.createCachedTrigger( menu.getTrigger( triggerNumber ) ) ); } size_t numberOfLastPassedTrigger=0; // This is just so I can work out the pure rate for( size_t eventNumber=0; eventNumber<sample.numberOfEvents(); ++eventNumber ) { const l1menu::IEvent& event=sample.getEvent(eventNumber); float weight=event.weight(); weightOfAllEvents+=weight; size_t numberOfTriggersPassed=0; for( size_t triggerNumber=0; triggerNumber<cachedTriggers.size(); ++triggerNumber ) { if( cachedTriggers[triggerNumber]->apply(event) ) { // If the event passes the trigger, increment the counters ++numberOfTriggersPassed; weightOfEventsPassed[triggerNumber]+=weight; weightSquaredOfEventsPassed[triggerNumber]+=(weight*weight); numberOfLastPassedTrigger=triggerNumber; // If only one event passes, this is used to increment the pure counter } } // See if I should increment any of the pure or total counters if( numberOfTriggersPassed==1 ) { weightOfEventsPure[numberOfLastPassedTrigger]+=weight; weightSquaredOfEventsPure[numberOfLastPassedTrigger]+=(weight*weight); } if( numberOfTriggersPassed>0 ) { weightOfEventsPassingAnyTrigger+=weight; weightSquaredOfEventsPassingAnyTrigger+=(weight*weight); } } float scaling=sample.eventRate(); for( size_t triggerNumber=0; triggerNumber<cachedTriggers.size(); ++triggerNumber ) { float fraction=weightOfEventsPassed[triggerNumber]/weightOfAllEvents; float fractionError=std::sqrt(weightSquaredOfEventsPassed[triggerNumber])/weightOfAllEvents; float pureFraction=weightOfEventsPure[triggerNumber]/weightOfAllEvents; float pureFractionError=std::sqrt(weightSquaredOfEventsPure[triggerNumber])/weightOfAllEvents; triggerRates_.push_back( std::move(TriggerRateImplementation(menu.getTrigger(triggerNumber),fraction,fractionError,fraction*scaling,fractionError*scaling,pureFraction,pureFractionError,pureFraction*scaling,pureFractionError*scaling) ) ); //triggerRates_.push_back( std::move(TriggerRateImplementation(menu.getTrigger(triggerNumber),weightOfEventsPassed[triggerNumber],weightSquaredOfEventsPassed[triggerNumber],weightOfEventsPure[triggerNumber],weightSquaredOfEventsPure[triggerNumber],*this)) ); } // // Now I have everything I need to calculate all of the values required by the interface // totalFraction_=weightOfEventsPassingAnyTrigger/weightOfAllEvents; totalFractionError_=std::sqrt(weightSquaredOfEventsPassingAnyTrigger)/weightOfAllEvents; totalRate_=totalFraction_*scaling; totalRateError_=totalFractionError_*scaling; }