void sim(Int_t nev=1) { gSystem->Load("liblhapdf"); gSystem->Load("libEGPythia6"); gSystem->Load("libpythia6"); gSystem->Load("libAliPythia6"); gSystem->Load("libhijing"); gSystem->Load("libTHijing"); gSystem->Load("libgeant321"); if (gSystem->Getenv("EVENT")) nev = atoi(gSystem->Getenv("EVENT")) ; AliSimulation simulator; simulator.SetMakeSDigits("TRD TOF PHOS HMPID EMCAL FMD ZDC PMD T0 VZERO"); simulator.SetMakeDigitsFromHits("ITS TPC"); simulator.SetWriteRawData("ALL","raw.root",kTRUE); simulator.SetDefaultStorage("local:///cvmfs/alice-ocdb.cern.ch/calibration/MC/Ideal"); //simulator.SetDefaultStorage(Form("local://%s/OCDB", gSystem->pwd())); simulator.SetSpecificStorage("GRP/GRP/Data", Form("local://%s",gSystem->pwd())); simulator.SetRunQA("ALL:ALL") ; simulator.SetQARefDefaultStorage("local://$ALICE_ROOT/QAref") ; for (Int_t det = 0 ; det < AliQA::kNDET ; det++) { simulator.SetQACycles((AliQAv1::DETECTORINDEX_t)det, nev+1) ; } TStopwatch timer; timer.Start(); simulator.Run(nev); timer.Stop(); timer.Print(); }
void write(int n) { TRandom R; TStopwatch timer; TFile f1("mathcoreVectorIO_F.root","RECREATE"); // create tree TTree t1("t1","Tree with new Float LorentzVector"); XYZTVectorF *v1 = new XYZTVectorF(); t1.Branch("LV branch","ROOT::Math::XYZTVectorF",&v1); timer.Start(); for (int i = 0; i < n; ++i) { double Px = R.Gaus(0,10); double Py = R.Gaus(0,10); double Pz = R.Gaus(0,10); double E = R.Gaus(100,10); //CylindricalEta4D<double> & c = v1->Coordinates(); //c.SetValues(Px,pY,pZ,E); v1->SetCoordinates(Px,Py,Pz,E); t1.Fill(); } f1.Write(); timer.Stop(); std::cout << " Time for new Float Vector " << timer.RealTime() << " " << timer.CpuTime() << std::endl; t1.Print(); }
void run_PPToGammaGammaFiles() { gROOT->LoadMacro("FlatTreeMaker_Delphes_PPToGammaGammaFiles_C.so"); TChain* fChain = new TChain("Delphes"); ifstream sourceFiles("PPToGammaGammaFiles.txt"); char line[128]; int count = 0; cout<< "Adding files from PPToGammaGammaFiles to chain..."<< endl; while (sourceFiles >> line) { fChain->Add(line); ++count; } cout << count<<" files added!"<<endl; sourceFiles.close(); TStopwatch timer; timer.Start(); fChain->Process("FlatTreeMaker_Delphes"); cout << "\n\nDone!" << endl; cout << "CPU Time : " << timer.CpuTime() <<endl; cout << "RealTime : " << timer.RealTime() <<endl; cout <<"\n"; }
void makePlots() { gROOT->LoadMacro("analyze.C+"); TStopwatch ts; ts.Start(); TString input_ele = "ELE_FILE_TO_RUN"; TString input_muon = "MUON_FILE_TO_RUN"; bool addMC = true; int intLumi = 19712; // quote to 19.7 double metCut = -1.; bool displayKStest = true; bool blinded = true; int nPhotons_req = 0; const int nChannels = 4; TString channels[nChannels] = {"ele_jjj", "ele_bjj", "muon_jjj", "muon_bjj"}; int nBtagReq[nChannels] = {0, 1, 0, 1}; for(int i = 0; i < nChannels; i++) { if(i != 1 && i != 3) continue; if(i < 2) analyze(input_ele, addMC, i, intLumi, metCut, nPhotons_req, nBtagReq[i], displayKStest, blinded); else analyze(input_muon, addMC, i, intLumi, metCut, nPhotons_req, nBtagReq[i], displayKStest, blinded); } ts.Stop(); std::cout << "RealTime : " << ts.RealTime()/60.0 << " minutes" << std::endl; std::cout << "CPUTime : " << ts.CpuTime()/60.0 << " minutes" << std::endl; }
void sim(Int_t nev=1) { AliSimulation simu; simu.SetMakeSDigits("TRD TOF PHOS HMPID EMCAL MUON FMD PMD T0 ZDC VZERO"); simu.SetMakeDigits ("TRD TOF PHOS HMPID EMCAL MUON FMD PMD T0 ZDC VZERO"); simu.SetMakeDigitsFromHits("ITS TPC"); simu.SetWriteRawData("ALL","raw.root",kTRUE); simu.SetDefaultStorage("local://$ALICE_ROOT/OCDB"); simu.SetSpecificStorage("GRP/GRP/Data", Form("local://%s",gSystem->pwd())); simu.SetRunQA("ALL:ALL") ; simu.SetQARefDefaultStorage("local://$ALICE_ROOT/OCDB") ; for (Int_t det = 0 ; det < AliQA::kNDET ; det++) { simu.SetQACycles(det, 2) ; } TStopwatch timer; timer.Start(); simu.Run(nev); WriteXsection(); timer.Stop(); timer.Print(); }
int main(int argc, char **argv) { TStopwatch reloj; reloj.Start(); // split by ',' string argStr = argv[1]; vector<string> fileList; for (size_t i=0,n; i <= argStr.length(); i=n+1){ n = argStr.find_first_of(',',i); if (n == string::npos) n = argStr.length(); string tmp = argStr.substr(i,n-i); fileList.push_back(tmp); } Analysis1 o(fileList); o.EventsLoop(); reloj.Stop(); double tiempo = reloj.CpuTime(); cout << "tiempo gastado en el calculo = " << tiempo << endl; return 0; }
void sim(Int_t nev=1) { gSystem->Exec(" rm itsSegmentations.root "); AliSimulation simulator; // simulator.SetMakeSDigits(""); // simulator.SetMakeDigits(""); simulator.SetDefaultStorage("local://$ALICE_ROOT/OCDB"); simulator.SetSpecificStorage("GRP/GRP/Data", Form("local://%s",gSystem->pwd())); simulator.SetSpecificStorage("ITS/Align/Data", Form("local://%s",gSystem->pwd())); simulator.SetSpecificStorage("ITS/Calib/SimuParam", Form("local://%s",gSystem->pwd())); simulator.SetRunHLT(""); simulator.SetRunQA(":"); TStopwatch timer; timer.Start(); simulator.Run(nev); timer.Stop(); timer.Print(); }
void testIntegPerf(double x1, double x2, int n = 100000){ std::cout << "\n\n***************************************************************\n"; std::cout << "Test integration performances in interval [ " << x1 << " , " << x2 << " ]\n\n"; TStopwatch timer; double dx = (x2-x1)/double(n); //ROOT::Math::Functor1D<ROOT::Math::IGenFunction> f1(& TMath::BreitWigner); ROOT::Math::WrappedFunction<> f1(func); timer.Start(); ROOT::Math::Integrator ig(f1 ); double s1 = 0.0; nc = 0; for (int i = 0; i < n; ++i) { double x = x1 + dx*i; s1+= ig.Integral(x1,x); } timer.Stop(); std::cout << "Time using ROOT::Math::Integrator :\t" << timer.RealTime() << std::endl; std::cout << "Number of function calls = " << nc/n << std::endl; int pr = std::cout.precision(18); std::cout << s1 << std::endl; std::cout.precision(pr); //TF1 *fBW = new TF1("fBW","TMath::BreitWigner(x)",x1, x2); // this is faster but cannot measure number of function calls TF1 *fBW = new TF1("fBW",func2,x1, x2,0); timer.Start(); nc = 0; double s2 = 0; for (int i = 0; i < n; ++i) { double x = x1 + dx*i; s2+= fBW->Integral(x1,x ); } timer.Stop(); std::cout << "Time using TF1::Integral :\t\t\t" << timer.RealTime() << std::endl; std::cout << "Number of function calls = " << nc/n << std::endl; pr = std::cout.precision(18); std::cout << s1 << std::endl; std::cout.precision(pr); }
// test using UNURAN string interface void testStringAPI() { TH1D * h1 = new TH1D("h1G","gaussian distribution from Unuran",100,-10,10); TH1D * h2 = new TH1D("h2G","gaussian distribution from TRandom",100,-10,10); cout << "\nTest using UNURAN string API \n\n"; TUnuran unr; if (! unr.Init( "normal()", "method=arou") ) { cout << "Error initializing unuran" << endl; return; } int n = NGEN; TStopwatch w; w.Start(); for (int i = 0; i < n; ++i) { double x = unr.Sample(); h1->Fill( x ); } w.Stop(); cout << "Time using Unuran method " << unr.MethodName() << "\t=\t " << w.CpuTime() << endl; // use TRandom::Gaus w.Start(); for (int i = 0; i < n; ++i) { double x = gRandom->Gaus(0,1); h2->Fill( x ); } w.Stop(); cout << "Time using TRandom::Gaus \t=\t " << w.CpuTime() << endl; assert(c1 != 0); c1->cd(++izone); h1->Draw(); c1->cd(++izone); h2->Draw(); }
void testDiscDistr() { cout << "\nTest Discrete distributions\n\n"; TH1D * h1 = new TH1D("h1PS","Unuran Poisson prob",20,0,20); TH1D * h2 = new TH1D("h2PS","Poisson dist from TRandom",20,0,20); double mu = 5; TF1 * f = new TF1("fps",poisson,1,0,1); f->SetParameter(0,mu); TUnuranDiscrDist dist2 = TUnuranDiscrDist(f); TUnuran unr; // dari method (needs also the mode and pmf sum) dist2.SetMode(int(mu) ); dist2.SetProbSum(1.0); bool ret = unr.Init(dist2,"dari"); if (!ret) return; TStopwatch w; w.Start(); int n = NGEN; for (int i = 0; i < n; ++i) { int k = unr.SampleDiscr(); h1->Fill( double(k) ); } w.Stop(); cout << "Time using Unuran method " << unr.MethodName() << "\t=\t\t " << w.CpuTime() << endl; w.Start(); for (int i = 0; i < n; ++i) { h2->Fill( gRandom->Poisson(mu) ); } cout << "Time using TRandom::Poisson " << "\t=\t\t " << w.CpuTime() << endl; c1->cd(++izone); h1->SetMarkerStyle(20); h1->Draw("E"); h2->Draw("same"); std::cout << " chi2 test of UNURAN vs TRandom generated histograms: " << std::endl; h1->Chi2Test(h2,"UUP"); }
void generate( R & r, TH1D * h) { TStopwatch w; r.SetSeed(0); //r.SetSeed(int(std::pow(2.0,28))); int m = NLOOP; int n = NEVT; for (int j = 0; j < m; ++j) { //std::cout << r.GetSeed() << " "; w.Start(); // if ( n < 40000000) iseed = std::rand(); // iseed = 0; //TRandom3 r3(0); //r.SetSeed( 0 ); // generate random seeds //TRandom3 r3(0); //r.SetSeed (static_cast<UInt_t> (4294967296.*r3.Rndm()) ); // estimate PI double n1=0; double rn[2000]; double x; double y; for (int ievt = 0; ievt < n; ievt+=1000 ) { r.RndmArray(2000,rn); for (int i=0; i < 1000; i++) { x=rn[2*i]; y=rn[2*i+1]; if ( ( x*x + y*y ) <= 1.0 ) n1++; } } double piEstimate = 4.0 * double(n1)/double(n); double delta = piEstimate-PI; h->Fill(delta); } w.Stop(); std::cout << std::endl; std::cout << "Random: " << typeid(r).name() << "\n\tTime = " << w.RealTime() << " " << w.CpuTime() << std::endl; std::cout << "Time/call: " << w.CpuTime()/(2*n)*1.0E9 << std::endl; }
RooAddPdf fitZToMuMuGammaMassUnbinned( const char *filename = "ZMuMuGammaMass_36.1ipb_EE.txt", // const char *filename = "ZMuMuGammaMass_2.9ipb_EB.txt", // const char *filename = "ZMuMuGammaMass_2.9ipb_EE.txt", // const char *filename = "ZMuMuGammaMass_Zmumu_Spring10_EB.txt", // const char *filename = "DimuonMass_data_Nov4ReReco.txt", const char* plotOpt = "NEU", const int nbins = 60) { gROOT->ProcessLine(".L tdrstyle.C"); setTDRStyle(); gStyle->SetPadRightMargin(0.05); double minMass = 60; double maxMass = 120; RooRealVar mass("mass","m(#mu#mu#gamma)", minMass, maxMass,"GeV/c^{2}"); // Read data set RooDataSet *data = RooDataSet::read(filename,RooArgSet(mass)); // RooDataSet *dataB = RooDataSet::read(filenameB,RooArgSet(mass)); // Build p.d.f. //////////////////////////////////////////////// // Parameters // //////////////////////////////////////////////// // Signal p.d.f. parameters // Parameters for a Gaussian and a Crystal Ball Lineshape RooRealVar cbBias ("#Deltam_{CB}", "CB Bias", 0.05, -2, 2,"GeV/c^{2}"); RooRealVar cbSigma("#sigma_{CB}","CB Width", 1.38, 0.01, 10.0,"GeV/c^{2}"); RooRealVar cbCut ("a_{CB}","CB Cut", 1.5, 0.1, 2.0); RooRealVar cbPower("n_{CB}","CB Power", 1.3, 0.1, 20.0); // cbSigma.setConstant(kTRUE); // cbCut.setConstant(kTRUE); // cbPower.setConstant(kTRUE); // Parameters for Breit-Wigner RooRealVar bwMean("m_{Z}","BW Mean", 91.1876, "GeV/c^{2}"); RooRealVar bwWidth("#Gamma_{Z}", "BW Width", 2.4952, "GeV/c^{2}"); // Keep Breit-Wigner parameters fixed to the PDG values // bwMean.setConstant(kTRUE); // bwWidth.setConstant(kTRUE); // Background p.d.f. parameters // Parameters for exponential RooRealVar expRate("#lambda_{exp}", "Exponential Rate", -0.119, -10, 1); // expRate.setConstant(kTRUE); // fraction of signal // RooRealVar frac("frac", "Signal Fraction", 0.1,0.,0.3.); /* RooRealVar nsig("N_{S}", "#signal events", 9000, 0.,10000.); RooRealVar nbkg("N_{B}", "#background events", 1000,2,10000.);*/ RooRealVar nsig("N_{S}", "#signal events", 29300, 0.1, 100000.); RooRealVar nbkg("N_{B}", "#background events", 0, 0., 10000.); // nbkg.setConstant(kTRUE); //////////////////////////////////////////////// // P.D.F.s // //////////////////////////////////////////////// // Di-photon mass signal p.d.f. RooBreitWigner bw("bw", "bw", mass, bwMean, bwWidth); // RooGaussian signal("signal", "A Gaussian Lineshape", mass, m0, sigma); RooCBShape cball("cball", "A Crystal Ball Lineshape", mass, cbBias, cbSigma, cbCut, cbPower); mass.setBins(100000, "fft"); RooFFTConvPdf BWxCB("BWxCB","bw (X) crystall ball", mass, bw, cball); // Di-photon mass background p.d.f. RooExponential bg("bg","bkgd exp", mass, expRate); // Di-photon mass model p.d.f. RooAddPdf model("model", "signal + background mass model", RooArgList(BWxCB, bg), RooArgList(nsig, nbkg)); TStopwatch t ; t.Start() ; model.fitTo(*data,FitOptions("mh"),Optimize(0),Timer(1)); // signal->fitTo(*data,FitOptions("mh"),Optimize(0),Timer(1)); t.Print() ; TCanvas *c = new TCanvas("c","Unbinned Invariant Mass Fit", 0,0,800,600); // Plot the fit results RooPlot* plot = mass.frame(Range(minMass,maxMass),Bins(nbins)); // Plot 1 // dataB->plotOn(plot, MarkerColor(kRed), LineColor(kRed)); data->plotOn(plot); // model.plotOn(plot); model.plotOn(plot); //model.paramOn(plot, Format(plotOpt, AutoPrecision(1)), Parameters(RooArgSet(nsig, nbkg, m0, sigma))); model.paramOn(plot, Format(plotOpt, AutoPrecision(2) ), Parameters(RooArgSet(cbBias, cbSigma, cbCut, cbPower, bwMean, bwWidth, expRate, nsig, nbkg)), Layout(.67, 0.97, 0.97), ShowConstants(kTRUE) ); // model.plotOn(plot, Components("signal"), LineStyle(kDashed), LineColor(kRed)); model.plotOn(plot, Components("bg"), LineStyle(kDashed), LineColor(kRed)); plot->Draw(); // TLatex * tex = new TLatex(0.2,0.8,"CMS preliminary"); // tex->SetNDC(); // tex->SetTextFont(42); // tex->SetLineWidth(2); // tex->Draw(); // tex->DrawLatex(0.2, 0.725, "7 TeV Data, L = 258 pb^{-1}"); // // float fsig_peak = NormalizedIntegral(model, // mass, // cbBias.getVal() - 2.5*cbSigma.getVal(), // cbBias.getVal() + 2.5*cbSigma.getVal() // ); // float fbkg_peak = NormalizedIntegral(bg, // mass, // m0.getVal() - 2.5*sigma.getVal(), // m0.getVal() + 2.5*sigma.getVal() // ); /* double nsigVal = fsig_peak * nsig.getVal(); double nsigErr = fsig_peak * nsig.getError(); double nsigErrRel = nsigErr / nsigVal;*/ // double nbkgVal = fbkg_peak * nbkg.getVal(); // double nbkgErr = fbkg_peak * nbkg.getError(); // double nbkgErrRel = nbkgErr / nbkgVal; // cout << "nsig " << nsigVal << " +/- " << nsigErr << endl; // cout << "S/B_{#pm2.5#sigma} " << nsigVal/nbkgVal << " +/- " // << (nsigVal/nbkgVal)*sqrt(nsigErrRel*nsigErrRel + nbkgErrRel*nbkgErrRel) // << endl; // tex->DrawLatex(0.2, 0.6, Form("N_{S} = %.0f#pm%.0f", nsigVal, nsigErr) ); // tex->DrawLatex(0.2, 0.525, Form("S/B_{#pm2.5#sigma} = %.1f", nsigVal/nbkgVal) ); // tex->DrawLatex(0.2, 0.45, Form("#frac{S}{#sqrt{B}}_{#pm2.5#sigma} = %.1f", nsigVal/sqrt(nbkgVal))); // leg = new TLegend(0.65,0.6,0.9,0.75); // leg->SetFillColor(kWhite); // leg->SetLineColor(kWhite); // leg->SetShadowColor(kWhite); // leg->SetTextFont(42); // TLegendEntry * ldata = leg->AddEntry(data, "Opposite Sign"); // TLegendEntry * ldataB = leg->AddEntry(dataB, "Same Sign"); // ldata->SetMarkerStyle(20); // ldataB->SetMarkerStyle(20); // ldataB->SetMarkerColor(kRed); // leg->Draw(); return model; }
// internal routine to run the inverter HypoTestInverterResult * RooStats::HypoTestInvTool::RunInverter(RooWorkspace * w, const char * modelSBName, const char * modelBName, const char * dataName, int type, int testStatType, bool useCLs, int npoints, double poimin, double poimax, int ntoys, bool useNumberCounting, const char * nuisPriorName ){ std::cout << "Running HypoTestInverter on the workspace " << w->GetName() << std::endl; w->Print(); RooAbsData * data = w->data(dataName); if (!data) { Error("StandardHypoTestDemo","Not existing data %s",dataName); return 0; } else std::cout << "Using data set " << dataName << std::endl; if (mUseVectorStore) { RooAbsData::setDefaultStorageType(RooAbsData::Vector); data->convertToVectorStore() ; } // get models from WS // get the modelConfig out of the file ModelConfig* bModel = (ModelConfig*) w->obj(modelBName); ModelConfig* sbModel = (ModelConfig*) w->obj(modelSBName); if (!sbModel) { Error("StandardHypoTestDemo","Not existing ModelConfig %s",modelSBName); return 0; } // check the model if (!sbModel->GetPdf()) { Error("StandardHypoTestDemo","Model %s has no pdf ",modelSBName); return 0; } if (!sbModel->GetParametersOfInterest()) { Error("StandardHypoTestDemo","Model %s has no poi ",modelSBName); return 0; } if (!sbModel->GetObservables()) { Error("StandardHypoTestInvDemo","Model %s has no observables ",modelSBName); return 0; } if (!sbModel->GetSnapshot() ) { Info("StandardHypoTestInvDemo","Model %s has no snapshot - make one using model poi",modelSBName); sbModel->SetSnapshot( *sbModel->GetParametersOfInterest() ); } // case of no systematics // remove nuisance parameters from model if (noSystematics) { const RooArgSet * nuisPar = sbModel->GetNuisanceParameters(); if (nuisPar && nuisPar->getSize() > 0) { std::cout << "StandardHypoTestInvDemo" << " - Switch off all systematics by setting them constant to their initial values" << std::endl; RooStats::SetAllConstant(*nuisPar); } if (bModel) { const RooArgSet * bnuisPar = bModel->GetNuisanceParameters(); if (bnuisPar) RooStats::SetAllConstant(*bnuisPar); } } if (!bModel || bModel == sbModel) { Info("StandardHypoTestInvDemo","The background model %s does not exist",modelBName); Info("StandardHypoTestInvDemo","Copy it from ModelConfig %s and set POI to zero",modelSBName); bModel = (ModelConfig*) sbModel->Clone(); bModel->SetName(TString(modelSBName)+TString("_with_poi_0")); RooRealVar * var = dynamic_cast<RooRealVar*>(bModel->GetParametersOfInterest()->first()); if (!var) return 0; double oldval = var->getVal(); var->setVal(0); bModel->SetSnapshot( RooArgSet(*var) ); var->setVal(oldval); } else { if (!bModel->GetSnapshot() ) { Info("StandardHypoTestInvDemo","Model %s has no snapshot - make one using model poi and 0 values ",modelBName); RooRealVar * var = dynamic_cast<RooRealVar*>(bModel->GetParametersOfInterest()->first()); if (var) { double oldval = var->getVal(); var->setVal(0); bModel->SetSnapshot( RooArgSet(*var) ); var->setVal(oldval); } else { Error("StandardHypoTestInvDemo","Model %s has no valid poi",modelBName); return 0; } } } // check model has global observables when there are nuisance pdf // for the hybrid case the globobs are not needed if (type != 1 ) { bool hasNuisParam = (sbModel->GetNuisanceParameters() && sbModel->GetNuisanceParameters()->getSize() > 0); bool hasGlobalObs = (sbModel->GetGlobalObservables() && sbModel->GetGlobalObservables()->getSize() > 0); if (hasNuisParam && !hasGlobalObs ) { // try to see if model has nuisance parameters first RooAbsPdf * constrPdf = RooStats::MakeNuisancePdf(*sbModel,"nuisanceConstraintPdf_sbmodel"); if (constrPdf) { Warning("StandardHypoTestInvDemo","Model %s has nuisance parameters but no global observables associated",sbModel->GetName()); Warning("StandardHypoTestInvDemo","\tThe effect of the nuisance parameters will not be treated correctly "); } } } // run first a data fit const RooArgSet * poiSet = sbModel->GetParametersOfInterest(); RooRealVar *poi = (RooRealVar*)poiSet->first(); std::cout << "StandardHypoTestInvDemo : POI initial value: " << poi->GetName() << " = " << poi->getVal() << std::endl; // fit the data first (need to use constraint ) TStopwatch tw; bool doFit = initialFit; if (testStatType == 0 && initialFit == -1) doFit = false; // case of LEP test statistic if (type == 3 && initialFit == -1) doFit = false; // case of Asymptoticcalculator with nominal Asimov double poihat = 0; if (minimizerType.size()==0) minimizerType = ROOT::Math::MinimizerOptions::DefaultMinimizerType(); else ROOT::Math::MinimizerOptions::SetDefaultMinimizer(minimizerType.c_str()); Info("StandardHypoTestInvDemo","Using %s as minimizer for computing the test statistic", ROOT::Math::MinimizerOptions::DefaultMinimizerType().c_str() ); if (doFit) { // do the fit : By doing a fit the POI snapshot (for S+B) is set to the fit value // and the nuisance parameters nominal values will be set to the fit value. // This is relevant when using LEP test statistics Info( "StandardHypoTestInvDemo"," Doing a first fit to the observed data "); RooArgSet constrainParams; if (sbModel->GetNuisanceParameters() ) constrainParams.add(*sbModel->GetNuisanceParameters()); RooStats::RemoveConstantParameters(&constrainParams); tw.Start(); RooFitResult * fitres = sbModel->GetPdf()->fitTo(*data,InitialHesse(false), Hesse(false), Minimizer(minimizerType.c_str(),"Migrad"), Strategy(0), PrintLevel(mPrintLevel), Constrain(constrainParams), Save(true) ); if (fitres->status() != 0) { Warning("StandardHypoTestInvDemo","Fit to the model failed - try with strategy 1 and perform first an Hesse computation"); fitres = sbModel->GetPdf()->fitTo(*data,InitialHesse(true), Hesse(false),Minimizer(minimizerType.c_str(),"Migrad"), Strategy(1), PrintLevel(mPrintLevel+1), Constrain(constrainParams), Save(true) ); } if (fitres->status() != 0) Warning("StandardHypoTestInvDemo"," Fit still failed - continue anyway....."); poihat = poi->getVal(); std::cout << "StandardHypoTestInvDemo - Best Fit value : " << poi->GetName() << " = " << poihat << " +/- " << poi->getError() << std::endl; std::cout << "Time for fitting : "; tw.Print(); //save best fit value in the poi snapshot sbModel->SetSnapshot(*sbModel->GetParametersOfInterest()); std::cout << "StandardHypoTestInvo: snapshot of S+B Model " << sbModel->GetName() << " is set to the best fit value" << std::endl; } // print a message in case of LEP test statistics because it affects result by doing or not doing a fit if (testStatType == 0) { if (!doFit) Info("StandardHypoTestInvDemo","Using LEP test statistic - an initial fit is not done and the TS will use the nuisances at the model value"); else Info("StandardHypoTestInvDemo","Using LEP test statistic - an initial fit has been done and the TS will use the nuisances at the best fit value"); } // build test statistics and hypotest calculators for running the inverter SimpleLikelihoodRatioTestStat slrts(*sbModel->GetPdf(),*bModel->GetPdf()); // null parameters must includes snapshot of poi plus the nuisance values RooArgSet nullParams(*sbModel->GetSnapshot()); if (sbModel->GetNuisanceParameters()) nullParams.add(*sbModel->GetNuisanceParameters()); if (sbModel->GetSnapshot()) slrts.SetNullParameters(nullParams); RooArgSet altParams(*bModel->GetSnapshot()); if (bModel->GetNuisanceParameters()) altParams.add(*bModel->GetNuisanceParameters()); if (bModel->GetSnapshot()) slrts.SetAltParameters(altParams); // ratio of profile likelihood - need to pass snapshot for the alt RatioOfProfiledLikelihoodsTestStat ropl(*sbModel->GetPdf(), *bModel->GetPdf(), bModel->GetSnapshot()); ropl.SetSubtractMLE(false); if (testStatType == 11) ropl.SetSubtractMLE(true); ropl.SetPrintLevel(mPrintLevel); ropl.SetMinimizer(minimizerType.c_str()); ProfileLikelihoodTestStat profll(*sbModel->GetPdf()); if (testStatType == 3) profll.SetOneSided(true); if (testStatType == 4) profll.SetSigned(true); profll.SetMinimizer(minimizerType.c_str()); profll.SetPrintLevel(mPrintLevel); profll.SetReuseNLL(mOptimize); slrts.SetReuseNLL(mOptimize); ropl.SetReuseNLL(mOptimize); if (mOptimize) { profll.SetStrategy(0); ropl.SetStrategy(0); ROOT::Math::MinimizerOptions::SetDefaultStrategy(0); } if (mMaxPoi > 0) poi->setMax(mMaxPoi); // increase limit MaxLikelihoodEstimateTestStat maxll(*sbModel->GetPdf(),*poi); NumEventsTestStat nevtts; AsymptoticCalculator::SetPrintLevel(mPrintLevel); // create the HypoTest calculator class HypoTestCalculatorGeneric * hc = 0; if (type == 0) hc = new FrequentistCalculator(*data, *bModel, *sbModel); else if (type == 1) hc = new HybridCalculator(*data, *bModel, *sbModel); // else if (type == 2 ) hc = new AsymptoticCalculator(*data, *bModel, *sbModel, false, mAsimovBins); // else if (type == 3 ) hc = new AsymptoticCalculator(*data, *bModel, *sbModel, true, mAsimovBins); // for using Asimov data generated with nominal values else if (type == 2 ) hc = new AsymptoticCalculator(*data, *bModel, *sbModel, false ); else if (type == 3 ) hc = new AsymptoticCalculator(*data, *bModel, *sbModel, true ); // for using Asimov data generated with nominal values else { Error("StandardHypoTestInvDemo","Invalid - calculator type = %d supported values are only :\n\t\t\t 0 (Frequentist) , 1 (Hybrid) , 2 (Asymptotic) ",type); return 0; } // set the test statistic TestStatistic * testStat = 0; if (testStatType == 0) testStat = &slrts; if (testStatType == 1 || testStatType == 11) testStat = &ropl; if (testStatType == 2 || testStatType == 3 || testStatType == 4) testStat = &profll; if (testStatType == 5) testStat = &maxll; if (testStatType == 6) testStat = &nevtts; if (testStat == 0) { Error("StandardHypoTestInvDemo","Invalid - test statistic type = %d supported values are only :\n\t\t\t 0 (SLR) , 1 (Tevatron) , 2 (PLR), 3 (PLR1), 4(MLE)",testStatType); return 0; } ToyMCSampler *toymcs = (ToyMCSampler*)hc->GetTestStatSampler(); if (toymcs && (type == 0 || type == 1) ) { // look if pdf is number counting or extended if (sbModel->GetPdf()->canBeExtended() ) { if (useNumberCounting) Warning("StandardHypoTestInvDemo","Pdf is extended: but number counting flag is set: ignore it "); } else { // for not extended pdf if (!useNumberCounting ) { int nEvents = data->numEntries(); Info("StandardHypoTestInvDemo","Pdf is not extended: number of events to generate taken from observed data set is %d",nEvents); toymcs->SetNEventsPerToy(nEvents); } else { Info("StandardHypoTestInvDemo","using a number counting pdf"); toymcs->SetNEventsPerToy(1); } } toymcs->SetTestStatistic(testStat); if (data->isWeighted() && !mGenerateBinned) { Info("StandardHypoTestInvDemo","Data set is weighted, nentries = %d and sum of weights = %8.1f but toy generation is unbinned - it would be faster to set mGenerateBinned to true\n",data->numEntries(), data->sumEntries()); } toymcs->SetGenerateBinned(mGenerateBinned); toymcs->SetUseMultiGen(mOptimize); if (mGenerateBinned && sbModel->GetObservables()->getSize() > 2) { Warning("StandardHypoTestInvDemo","generate binned is activated but the number of ovservable is %d. Too much memory could be needed for allocating all the bins",sbModel->GetObservables()->getSize() ); } // set the random seed if needed if (mRandomSeed >= 0) RooRandom::randomGenerator()->SetSeed(mRandomSeed); } // specify if need to re-use same toys if (reuseAltToys) { hc->UseSameAltToys(); } if (type == 1) { HybridCalculator *hhc = dynamic_cast<HybridCalculator*> (hc); assert(hhc); hhc->SetToys(ntoys,ntoys/mNToysRatio); // can use less ntoys for b hypothesis // remove global observables from ModelConfig (this is probably not needed anymore in 5.32) bModel->SetGlobalObservables(RooArgSet() ); sbModel->SetGlobalObservables(RooArgSet() ); // check for nuisance prior pdf in case of nuisance parameters if (bModel->GetNuisanceParameters() || sbModel->GetNuisanceParameters() ) { // fix for using multigen (does not work in this case) toymcs->SetUseMultiGen(false); ToyMCSampler::SetAlwaysUseMultiGen(false); RooAbsPdf * nuisPdf = 0; if (nuisPriorName) nuisPdf = w->pdf(nuisPriorName); // use prior defined first in bModel (then in SbModel) if (!nuisPdf) { Info("StandardHypoTestInvDemo","No nuisance pdf given for the HybridCalculator - try to deduce pdf from the model"); if (bModel->GetPdf() && bModel->GetObservables() ) nuisPdf = RooStats::MakeNuisancePdf(*bModel,"nuisancePdf_bmodel"); else nuisPdf = RooStats::MakeNuisancePdf(*sbModel,"nuisancePdf_sbmodel"); } if (!nuisPdf ) { if (bModel->GetPriorPdf()) { nuisPdf = bModel->GetPriorPdf(); Info("StandardHypoTestInvDemo","No nuisance pdf given - try to use %s that is defined as a prior pdf in the B model",nuisPdf->GetName()); } else { Error("StandardHypoTestInvDemo","Cannnot run Hybrid calculator because no prior on the nuisance parameter is specified or can be derived"); return 0; } } assert(nuisPdf); Info("StandardHypoTestInvDemo","Using as nuisance Pdf ... " ); nuisPdf->Print(); const RooArgSet * nuisParams = (bModel->GetNuisanceParameters() ) ? bModel->GetNuisanceParameters() : sbModel->GetNuisanceParameters(); RooArgSet * np = nuisPdf->getObservables(*nuisParams); if (np->getSize() == 0) { Warning("StandardHypoTestInvDemo","Prior nuisance does not depend on nuisance parameters. They will be smeared in their full range"); } delete np; hhc->ForcePriorNuisanceAlt(*nuisPdf); hhc->ForcePriorNuisanceNull(*nuisPdf); } } else if (type == 2 || type == 3) { if (testStatType == 3) ((AsymptoticCalculator*) hc)->SetOneSided(true); if (testStatType != 2 && testStatType != 3) Warning("StandardHypoTestInvDemo","Only the PL test statistic can be used with AsymptoticCalculator - use by default a two-sided PL"); } else if (type == 0 || type == 1) ((FrequentistCalculator*) hc)->SetToys(ntoys,ntoys/mNToysRatio); // Get the result RooMsgService::instance().getStream(1).removeTopic(RooFit::NumIntegration); HypoTestInverter calc(*hc); calc.SetConfidenceLevel(0.95); calc.UseCLs(useCLs); calc.SetVerbose(true); // can speed up using proof-lite if (mUseProof && mNWorkers > 1) { ProofConfig pc(*w, mNWorkers, "", kFALSE); toymcs->SetProofConfig(&pc); // enable proof } if (npoints > 0) { if (poimin > poimax) { // if no min/max given scan between MLE and +4 sigma poimin = int(poihat); poimax = int(poihat + 4 * poi->getError()); } std::cout << "Doing a fixed scan in interval : " << poimin << " , " << poimax << std::endl; calc.SetFixedScan(npoints,poimin,poimax); } else { //poi->setMax(10*int( (poihat+ 10 *poi->getError() )/10 ) ); std::cout << "Doing an automatic scan in interval : " << poi->getMin() << " , " << poi->getMax() << std::endl; } tw.Start(); HypoTestInverterResult * r = calc.GetInterval(); std::cout << "Time to perform limit scan \n"; tw.Print(); if (mRebuild) { calc.SetCloseProof(1); tw.Start(); SamplingDistribution * limDist = calc.GetUpperLimitDistribution(true,mNToyToRebuild); std::cout << "Time to rebuild distributions " << std::endl; tw.Print(); if (limDist) { std::cout << "expected up limit " << limDist->InverseCDF(0.5) << " +/- " << limDist->InverseCDF(0.16) << " " << limDist->InverseCDF(0.84) << "\n"; //update r to a new updated result object containing the rebuilt expected p-values distributions // (it will not recompute the expected limit) if (r) delete r; // need to delete previous object since GetInterval will return a cloned copy r = calc.GetInterval(); } else std::cout << "ERROR : failed to re-build distributions " << std::endl; } return r; }
void inflateTree(const char *name = "h42", const char *in = "root://eospps.cern.ch///eos/ppsscratch/test/h1big.root", const char *out = "/tmp/h1big.root", Int_t fact = 1) { TStopwatch sw; sw.Start(); // Get the input tree from the input file TFile *fin = TFile::Open(in); if (!fin || fin->IsZombie()) { Printf("inflateTree", "could not open input file: %s", in); return; } TTree *tin = (TTree *) fin->Get(name); if (!tin) { Printf("inflateTree", "could not find tree '%s' in %s", name, in); delete fin; return; } Long64_t nin = tin->GetEntriesFast(); Printf("Input tree '%s' has %lld entries", name, nin); // Create output file TFile *fout = TFile::Open(out, "RECREATE", 0, 1); if (!fout || fout->IsZombie()) { Printf("inflateTree", "could not open input file: %s", in); delete fin; return; } // Clone the header of the initial tree TTree *tout= (TTree *) tin->CloneTree(0); tout->SetMaxTreeSize(19000000000); // Duplicate all entries once #if 0 Int_t nc = fact; while (nc--) { Printf("Writing copy %d ...", fact - nc); tout->CopyEntries(tin); } #else for (Long64_t i = 0; i < nin; ++i) { if (tin->LoadTree(i) < 0) { break; } tin->GetEntry(i); Int_t nc = fact; while (nc--) { tout->Fill(); } if (i > 0 && !(i%1000)) { Printf("%d copies of %lld entries filled ...", fact, i); } } #endif // Finalize the writing out tout->Write(); // print perf stats sw.Stop(); std::cout << "Drawing. Realtime: " << sw.RealTime() << std::endl; std::cout << "Drawing. Cputime : " << sw.CpuTime() << std::endl; tin->PrintCacheStats(); // Close the files fout->Close(); fin->Close(); // Cleanup delete fout; delete fin; }
void r3blandreco(Int_t nNeutrons, Int_t beamE, Int_t Erel) { Int_t d; if(Erel == 100){ d = 35; } else{ d = 14; } // ----- Files --------------------------------------------------------------- char strDir[] = "."; char str[100]; char str2[100]; sprintf(str, "%1dAMeV.%1dn.%1dkeV.%1dm.root", beamE,nNeutrons, Erel, d); sprintf(str2, "%1dAMeV.%1dkeV.%1dm", beamE, Erel, d); TString inFile = TString(strDir) + "/r3bsim." + TString(str); TString digiFile = TString(strDir) + "/r3bcalibr." + TString(str); TString parFile = TString(strDir) + "/r3bpar." + TString(str); TString calibrFile = TString(strDir) + "/r3bcalibr." + TString(str2) + ".txt"; TString outFile = TString(strDir) + "/r3breco." + TString(str); // --------------------------------------------------------------------------- // ----- Timer --------------------------------------------------------------- TStopwatch timer; timer.Start(); // --------------------------------------------------------------------------- // ----- Digitization -------------------------------------------------------- FairRunAna *fRun= new FairRunAna(); fRun->SetInputFile(inFile); fRun->AddFriend(digiFile); fRun->SetOutputFile(outFile); // --------------------------------------------------------------------------- // --------------------------------------------------------------------------- Double_t beamEnergy; Double_t beamBeta; if(200 == beamE) { beamEnergy=200.; beamBeta=0.5676881; } else if(600 == beamE) { beamEnergy=600.; beamBeta=0.7937626; } else if(1000 == beamE) { beamEnergy=1000.; beamBeta=0.8760237; } // --------------------------------------------------------------------------- // ----- Connect the Tracking Task ------------------------------------------- R3BNeutronTracker2D* tracker = new R3BNeutronTracker2D(); tracker->UseBeam(beamEnergy, beamBeta); tracker->ReadCalibrFile(calibrFile.Data()); fRun->AddTask(tracker); // --------------------------------------------------------------------------- // ----- Runtime DataBase info ----------------------------------------------- FairRuntimeDb* rtdb = fRun->GetRuntimeDb(); FairParRootFileIo* parIo1 = new FairParRootFileIo(); parIo1->open(parFile.Data()); rtdb->setFirstInput(parIo1); rtdb->setOutput(parIo1); rtdb->saveOutput(); // --------------------------------------------------------------------------- // ----- Number of events to process ----------------------------------------- Int_t nEvents = 10000; // --------------------------------------------------------------------------- // ----- Intialise and run --------------------------------------------------- fRun->Init(); fRun->Run(0, nEvents); // --------------------------------------------------------------------------- // ----- Finish -------------------------------------------------------------- timer.Stop(); Double_t rtime = timer.RealTime(); Double_t ctime = timer.CpuTime(); cout << endl << endl; cout << "Macro finished succesfully." << endl; cout << "Output file writen: " << outFile << endl; cout << "Parameter file writen " << parFile << endl; cout << "Real time " << rtime << " s, CPU time " << ctime << " s" << endl; cout << endl; // --------------------------------------------------------------------------- }
void run(const Char_t *files=NULL, Bool_t mc=kFALSE, Bool_t tpid=kTRUE, Bool_t tchg=kFALSE, Bool_t tpp=kTRUE, Long64_t nev=1234567890, Long64_t first = 0) { TStopwatch timer; timer.Start(); // VERY GENERAL SETTINGS //AliLog::SetGlobalLogLevel(AliLog::kError); if(gSystem->Load("libANALYSIS.so")<0) return; if(gSystem->Load("libANALYSISalice.so")<0) return; if(gSystem->Load("libTender.so")<0) return; if(gSystem->Load("libTenderSupplies.so")<0) return; // if(gSystem->Load("libMES.so")<0) return; if(gSystem->Load("libPWGLFspectra.so")<0) return; // DEFINE DATA CHAIN TChain *chain = NULL; if(!files) chain = MakeChainLST(); else chain = MakeChainLST(files); if(!chain) return; chain->Lookup(); chain->GetListOfFiles()->Print(); Long64_t nfound=chain->GetEntries(); printf("\tENTRIES FOUND [%lli] REQUESTED [%lli]\n", nfound, nev>nfound?nfound:nev); // BUILD ANALYSIS MANAGER AliAnalysisManager *mgr = new AliAnalysisManager("Multiplicity and Event Shape"); AliESDInputHandler *esdH = new AliESDInputHandler(); AliMCEventHandler *mcH(NULL); mgr->SetInputEventHandler(esdH); if(mc) mgr->SetMCtruthEventHandler(mcH = new AliMCEventHandler()); //mgr->SetDebugLevel(10); mgr->SetSkipTerminate(kTRUE); // LOAD tasks // ******************* PID response ****************** gROOT->LoadMacro("$ALICE_ROOT/ANALYSIS/macros/AddTaskPIDResponse.C"); if(!mc) AddTaskPIDResponse(); else AddTaskPIDResponse(kTRUE,kTRUE,kTRUE,2); // ******************* Tenders *********************** AliTender *aliTender(NULL); gROOT->LoadMacro("$ALICE_PHYSICS/TENDER/TenderSupplies/AddTaskTender.C"); if(!mc){ // for DATA aliTender = (AliTender*)AddTaskTender(!mc, kTRUE, kTRUE, kTRUE, kTRUE, kFALSE, kTRUE, kFALSE, kFALSE); // (useV0, useTPC, !!! useTOF=kFALSE for MC !!!, useTRD, usePID, useVTX, useT0, useEmc, usePtFix) } else { // for MC aliTender = (AliTender*)AddTaskTender(!mc, kTRUE, kFALSE, kTRUE, kTRUE, kTRUE, kTRUE, kFALSE, kFALSE); // (useV0, useTPC, !!! useTOF=kFALSE for MC !!!, useTRD, usePID, useVTX, useT0, useEmc, usePtFix) } aliTender->SetHandleOCDB(kTRUE); //aliTender->SetDefaultCDBStorage(Form("alien://folder=/alice/data/2010/OCDB?cacheFolder=%s/local", gSystem->ExpandPathName("$HOME"))); // aliTender->SetDefaultCDBStorage(Form("local://%s/local/alice/data/2010/OCDB", gSystem->ExpandPathName("$HOME"))); // ******************* Physics Selection ************* gROOT->LoadMacro("$ALICE_PHYSICS/OADB/macros/AddTaskPhysicsSelection.C"); AliPhysicsSelectionTask *physSelTask = AddTaskPhysicsSelection(mc); // 0 = real data; 1 = MC // ******************* MES Tender ****************** gROOT->LoadMacro("$ALICE_PHYSICS/PWGLF/SPECTRA/MultEvShape/AddMEStender.C"); AddMEStender(mc); // ******************* MES PID task ****************** if(tpid){ gROOT->LoadMacro("$ALICE_PHYSICS/PWGLF/SPECTRA/MultEvShape/AddMESpidTask.C"); AddMESpidTask(mc); } // // // ******************* MES CHG task ****************** if(tchg){ gROOT->LoadMacro("$ALICE_PHYSICS/PWGLF/SPECTRA/MultEvShape/AddMESchgTask.C"); AddMESchgTask(mc); } // // // ******************* MES ppCol task ****************** if(tpp){ gROOT->LoadMacro("$ALICE_PHYSICS/PWGLF/SPECTRA/MultEvShape/AddMESppColTask.C"); AddMESppColTask(mc); } if (!mgr->InitAnalysis()) return; mgr->PrintStatus(); mgr->StartAnalysis("local", chain, nev, first); timer.Stop(); timer.Print(); // verbosity printf("\tCLEANING TASK LIST:\n"); mgr->GetTasks()->Delete(); if(mcH) delete mcH; delete esdH; delete chain; }
void test(const char * sdir ="signal", const char * bdir ="backgr") { TStopwatch timer; timer.Start(); TString name; // Signal file, tree, and branch name = sdir; name += "/IlcESDs.root"; TFile * fSig = TFile::Open(name.Data()); TTree * tSig = (TTree*)fSig->Get("esdTree"); IlcESDEvent * esdSig = new IlcESDEvent();// The signal ESD object is put here esdSig->ReadFromTree(tSig); // Run loader (signal events) name = sdir; name += "/gilc.root"; IlcRunLoader* rlSig = IlcRunLoader::Open(name.Data()); // Run loader (underlying events) name = bdir; name += "/gilc.root"; IlcRunLoader* rlUnd = IlcRunLoader::Open(name.Data(),"Underlying"); // gIlc rlSig->LoadgIlc(); rlUnd->LoadgIlc(); gIlc = rlSig->GetIlcRun(); // Now load kinematics and event header rlSig->LoadKinematics(); rlSig->LoadHeader(); rlUnd->LoadKinematics(); rlUnd->LoadHeader(); // Loop on events: check that MC and data contain the same number of events Long64_t nevSig = rlSig->GetNumberOfEvents(); Long64_t nevUnd = rlUnd->GetNumberOfEvents(); Long64_t nSigPerUnd = nevSig/nevUnd; cout << nevSig << " signal events" << endl; cout << nevUnd << " underlying events" << endl; cout << nSigPerUnd << " signal events per one underlying" << endl; for (Int_t iev=0; iev<nevSig; iev++) { cout << "Signal event " << iev << endl; Int_t ievUnd = iev/nSigPerUnd; cout << "Underlying event " << ievUnd << endl; // Get signal ESD tSig->GetEntry(iev); // Get signal kinematics rlSig->GetEvent(iev); // Get underlying kinematics rlUnd->GetEvent(ievUnd); // Particle stack IlcStack * stackSig = rlSig->Stack(); Int_t nPartSig = stackSig->GetNtrack(); IlcStack * stackUnd = rlUnd->Stack(); Int_t nPartUnd = stackUnd->GetNtrack(); Int_t nrec = esdSig->GetNumberOfTracks(); cout << nrec << " reconstructed tracks" << endl; for(Int_t irec=0; irec<nrec; irec++) { IlcESDtrack * track = esdSig->GetTrack(irec); UInt_t label = TMath::Abs(track->GetTPCLabel()); if (label>=10000000) { // Underlying event. 10000000 is the // value of fkMASKSTEP in IlcRunDigitizer // cout << " Track from the underlying event" << endl; label %=10000000; if (label>=nPartUnd) continue; TParticle * part = stackUnd->Particle(label); if(part) part->Print(); } else { cout << " Track " << label << " from the signal event" << endl; if (label>=nPartSig) { cout <<"Strange, label outside the range "<< endl; continue; } TParticle * part = stackSig->Particle(label); if(part) part->Print(); } } } fSig->Close(); timer.Stop(); timer.Print(); }
void csv2() { TString sysname ="CSV.root"; TFile *sysinput(0); sysinput = TFile::Open( sysname ); // if not: download from ROOT server std::vector<string> variables_; TString name; variables_.push_back("BDT__zjethist"); // variables_.push_back("BDT__phjethist"); variables_.push_back("BDT__tbartchhist"); variables_.push_back("BDT__tt3hist"); variables_.push_back("BDT__ttphhist"); variables_.push_back("BDT__wwphhist"); variables_.push_back("BDT__zzhist"); variables_.push_back("BDT__zgammahist"); variables_.push_back("BDT__singleantitopphotonhist"); std::vector<TH1F*> addhists; std::vector<TH1F*> wjetandwphjet; wjetandwphjet.push_back((TH1F*) sysinput->Get((std::string("BDT__wjet").c_str()))); wjetandwphjet.push_back((TH1F*) sysinput->Get((std::string("BDT__wphjethist").c_str()))); std::vector<TH1F*> jesuphists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__JES__plus"; jesuphists.push_back((TH1F*) sysinput->Get(name)); } //jesuphists.push_back((TH1F*) sysinput->Get((std::string("BDT__wjet").c_str()))); //jesuphists.push_back((TH1F*) sysinput->Get((std::string("BDT__wphjethist").c_str()))); for(unsigned int idx=1; idx<variables_.size(); ++idx){ jesuphists[idx]->Add(jesuphists[idx-1]); } addhists.push_back(jesuphists[variables_.size()-1]); std::vector<TH1F*> jesdownhists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__JES__minus"; jesdownhists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ jesdownhists[idx]->Add(jesdownhists[idx-1]);} addhists.push_back(jesdownhists[variables_.size()-1]); std::vector<TH1F*> jeruphists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__JER__plus"; jeruphists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ jeruphists[idx]->Add(jeruphists[idx-1]);} addhists.push_back(jeruphists[variables_.size()-1]); std::vector<TH1F*> jerdownhists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__JER__minus"; jerdownhists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ jerdownhists[idx]->Add(jerdownhists[idx-1]);} addhists.push_back(jerdownhists[variables_.size()-1]); std::vector<TH1F*> phesuphists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__PhES__plus"; phesuphists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ phesuphists[idx]->Add(phesuphists[idx-1]);} addhists.push_back(phesuphists[variables_.size()-1]); std::vector<TH1F*> phesdownhists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__PhES__minus"; phesdownhists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ phesdownhists[idx]->Add(phesdownhists[idx-1]);} addhists.push_back(phesdownhists[variables_.size()-1]); std::vector<TH1F*> puuphists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__PU__plus"; puuphists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ puuphists[idx]->Add(puuphists[idx-1]);} addhists.push_back(puuphists[variables_.size()-1]); std::vector<TH1F*> pudownhists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__PU__minus"; pudownhists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ pudownhists[idx]->Add(pudownhists[idx-1]);} addhists.push_back(pudownhists[variables_.size()-1]); std::vector<TH1F*> triguphists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__TRIG__plus"; triguphists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ triguphists[idx]->Add(triguphists[idx-1]);} addhists.push_back(triguphists[variables_.size()-1]); std::vector<TH1F*> trigdownhists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__TRIG__minus"; trigdownhists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ trigdownhists[idx]->Add(trigdownhists[idx-1]);} addhists.push_back(trigdownhists[variables_.size()-1]); std::vector<TH1F*> btaguphists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__BTAG__plus"; btaguphists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ btaguphists[idx]->Add(btaguphists[idx-1]);} addhists.push_back(btaguphists[variables_.size()-1]); std::vector<TH1F*> btagdownhists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__BTAG__minus"; btagdownhists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ btagdownhists[idx]->Add(btagdownhists[idx-1]);} addhists.push_back(btagdownhists[variables_.size()-1]); std::vector<TH1F*> misstaguphists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__MISSTAG__plus"; misstaguphists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ misstaguphists[idx]->Add(misstaguphists[idx-1]);} addhists.push_back(misstaguphists[variables_.size()-1]); std::vector<TH1F*> misstagdownhists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__MISSTAG__minus"; misstagdownhists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ misstagdownhists[idx]->Add(misstagdownhists[idx-1]);} addhists.push_back(misstagdownhists[variables_.size()-1]); std::vector<TH1F*> muonuphists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__MUON__plus"; muonuphists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ muonuphists[idx]->Add(muonuphists[idx-1]);} addhists.push_back(muonuphists[variables_.size()-1]); std::vector<TH1F*> muondownhists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__MUON__minus"; muondownhists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ muondownhists[idx]->Add(muondownhists[idx-1]);} addhists.push_back(muondownhists[variables_.size()-1]); std::vector<TH1F*> photonuphists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__PHOTON__plus"; photonuphists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ photonuphists[idx]->Add(photonuphists[idx-1]);} addhists.push_back(photonuphists[variables_.size()-1]); std::vector<TH1F*> photondownhists; for(unsigned int i=0; i<variables_.size(); ++i){ name=variables_[i]+"__PHOTON__minus"; photondownhists.push_back((TH1F*) sysinput->Get(name));} for(unsigned int idx=1; idx<variables_.size(); ++idx){ photondownhists[idx]->Add(photondownhists[idx-1]);} addhists.push_back(photondownhists[variables_.size()-1]); std::vector<std::vector<double_t> > vec(photondownhists[0]->GetNbinsX(), vector<double>(18)); for(int p = 0; p <photondownhists[0]->GetNbinsX(); p++){ //loop over bins for(int m = 0; m < 18; m++){ //loop over systematics vec[p][m]=addhists[m]->GetBinContent(p+1)+wjetandwphjet[0]->GetBinContent(p+1)+wjetandwphjet[1]->GetBinContent(p+1); cout<<vec[p][m]<<endl; }} // Book output histograms UInt_t nbin = 20; double min=0; double max=1; // Prepare input tree (this must be replaced by your data source) // in this example, there is a toy tree with signal and one with background events // we'll later on use only the "signal" events for the test in this example. // TFile *input(0); ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::vector<string> samples_; std::vector<string> datasamples_; std::vector<TH1F*> hists; std::vector<TH1F*> datahists; std::vector<TH1F*> revDATAhists; float scales[] = {0.628,0.0978,34.01,6.133,1.04,0.32,0.02,0.002,0.0961,0.0253,0.0224,0.0145,0.0125,0.0160,0.0158,0.0341,0.0341,0.0341,0.020,0.0017,0.0055,0.0032,0.00084,0.02,0.01139,0.01139,0.049094905/19.145}; samples_.push_back("WJET.root"); samples_.push_back("ZJET.root"); samples_.push_back("G_Pt_50to80.root"); samples_.push_back("G_Pt_80to120.root"); samples_.push_back("G_Pt_120to170.root"); samples_.push_back("G_Pt_170to300.root"); samples_.push_back("G_Pt_300to470.root"); samples_.push_back("G_Pt_470to800.root"); samples_.push_back("WPHJET.root"); samples_.push_back("T-W-CH.root"); samples_.push_back("TBAR-W-CH.root"); samples_.push_back("T-S-CH.root"); samples_.push_back("TBAR-S-CH.root"); samples_.push_back("T-T-CH.root"); samples_.push_back("TBAR-T-CH.root"); samples_.push_back("TTBAR1.root"); samples_.push_back("TTBAR2.root"); samples_.push_back("TTBAR3.root"); samples_.push_back("TTG.root"); samples_.push_back("WWG.root"); samples_.push_back("WW.root"); samples_.push_back("WZ.root"); samples_.push_back("ZZ.root"); samples_.push_back("ZGAMMA.root"); samples_.push_back("SINGLE-TOP-PH.root"); samples_.push_back("SINGLE-ANTITOP-PH.root"); samples_.push_back("SIGNALtGu.root"); datasamples_.push_back("REALDATA1.root"); datasamples_.push_back("REALDATA2.root"); datasamples_.push_back("REALDATA3.root"); std::vector<string> datasamplesreverse_; datasamplesreverse_.push_back("etarev/REALDATA1.root"); datasamplesreverse_.push_back("etarev/REALDATA2.root"); datasamplesreverse_.push_back("etarev/REALDATA3.root"); TH1F *wphjethist(0), *zjethist(0) , *phjethist(0), *wjethist(0), *twchhist(0), *tbarwhist(0), *tschhist(0), *tbarschhist(0), *ttchhist(0), *tbartchhist(0), *tt1hist(0) ,*tt2hist(0), *tt3hist(0), *ttphhist(0), *wwphhist(0), *wwhist(0), *wzhist(0), *zzhist(0), *zgammahist(0),*singletopphotonhist(0), *singleantitopphotonhist(0), *signalhist(0), *G_Pt_50to80(0),*G_Pt_80to120(0), *G_Pt_120to170(0), *G_Pt_170to300(0) ,*G_Pt_300to470(0),*G_Pt_470to800(0) ; TH1F *wphjethistSB(0), *zjethistSB(0) , *phjethistSB(0), *wjethistSB(0), *twchhistSB(0), *tbarwhistSB(0), *tschhistSB(0), *tbarschhistSB(0), *ttchhistSB(0), *tbartchhistSB(0), *tt1histSB(0) ,*tt2histSB(0), *tt3histSB(0), *ttphhistSB(0), *wwphhistSB(0), *wwhistSB(0), *wzhistSB(0), *zzhistSB(0), *zgammahistSB(0),*singletopphotonhistSB(0), *singleantitopphotonhistSB(0), *signalhistSB(0), *G_Pt_50to80SB(0),*G_Pt_80to120SB(0), *G_Pt_120to170SB(0), *G_Pt_170to300SB(0) ,*G_Pt_300to470SB(0),*G_Pt_470to800SB(0) ; TH1F *data1hist(0), *data2hist(0) ,*data3hist(0) ,*datahistsideband(0); TH1F *data1histrev(0), *data2histrev(0) ,*data3histrev(0), *datahistrevsideband(0); wphjethist = new TH1F( "mu_BDT__wphjethist", "mu_BDT__wphjethist", nbin, min, max ); zjethist = new TH1F( "mu_BDT__zjethist", "mu_BDT__zjethist", nbin, min, max ); G_Pt_50to80= new TH1F( "mu_BDT__G_Pt_50to80", "mu_BDT__G_Pt_50to80", nbin, min, max ); G_Pt_80to120= new TH1F( "mu_BDT__G_Pt_80to120", "mu_BDT__G_Pt_80to120", nbin, min, max ); G_Pt_120to170= new TH1F( "mu_BDT__G_Pt_120to170", "mu_BDT__G_Pt_120to170", nbin, min, max ); G_Pt_170to300= new TH1F( "mu_BDT__G_Pt_170to300", "mu_BDT__G_Pt_170to300", nbin, min, max ); G_Pt_300to470= new TH1F( "mu_BDT__G_Pt_300to470", "mu_BDT__G_Pt_300to470", nbin, min, max ); G_Pt_470to800= new TH1F( "mu_BDT__G_Pt_470to800", "mu_BDT__G_Pt_470to800", nbin, min, max ); wjethist = new TH1F( "mu_BDT__wjethist", "mu_BDT__wjethist", nbin, min, max); twchhist = new TH1F( "mu_BDT__twchhist", "mu_BDT__twchhist", nbin, min, max ); tbarwhist = new TH1F( "mu_BDT__tbarwhist", "mu_BDT__tbarwhist", nbin, min, max ); tschhist = new TH1F( "mu_BDT__tschhist", "mu_BDT__tschhist", nbin, min, max ); tbarschhist = new TH1F( "mu_BDT__tbarschhist", "mu_BDT__tbarschhist", nbin, min, max ); ttchhist = new TH1F( "mu_BDT__ttchhist", "mu_BDT__ttchhist", nbin, min, max ); tbartchhist = new TH1F( "mu_BDT__tbartchhist", "mu_BDT__tbartchhist", nbin, min, max); tt1hist = new TH1F( "mu_BDT__tt1hist", "mu_BDT__tt1hist", nbin,min, max ); tt2hist = new TH1F( "mu_BDT__tt2hist", "mu_BDT__tt2hist", nbin, min, max); tt3hist = new TH1F( "mu_BDT__tt3hist", "mu_BDT__tt3hist", nbin, min, max); ttphhist = new TH1F( "mu_BDT__ttphhist", "mu_BDT__ttphhist", nbin, min, max); wwphhist = new TH1F( "mu_BDT__wwphhist", "BDT__wwphhist", nbin,min, max ); wwhist = new TH1F( "mu_BDT__wwhist", "mu_BDT__wwhist", nbin,min, max ); wzhist = new TH1F( "mu_BDT__wzhist", "mu_BDT__wzhist", nbin, min, max ); zzhist = new TH1F( "mu_BDT__zzhist", "mu_BDT__zzhist", nbin, min, max ); zgammahist = new TH1F( "mu_BDT__zgammahist", "mu_BDT__zgammahist", nbin,min, max ); singletopphotonhist = new TH1F( "mu_BDT__singletopphotonhist", "mu_BDT__singletopphotonhist", nbin, min, max); singleantitopphotonhist = new TH1F( "mu_BDT__singleantitopphotonhist", "mu_BDT__singleantitopphotonhist", nbin,min, max ); signalhist = new TH1F( "mu_BDT__signal100", "mu_BDT__signal100", nbin, min, max ); data1hist = new TH1F( "mu_BDT__data1hist", "mu_BDT__data1hist", nbin, min, max ); data2hist = new TH1F( "mu_BDT__data2hist", "mu_BDT__data2hist", nbin, min, max ); data3hist = new TH1F( "mu_BDT__DATA", "mu_BDT__DATA", nbin, min, max ); datahistsideband = new TH1F( "mu_BDT__DATA_sideband", "mu_BDT__DATA_sideband", nbin, min, max); data1histrev = new TH1F( "mu_BDT__data1histrev", "mu_BDT__data1histrev", nbin, min, max ); data2histrev = new TH1F( "mu_BDT__data2histrev", "mu_BDT__data2histrev", nbin,min, max ); data3histrev = new TH1F( "mu_BDT__DATArev", "mu_BDT__DATArev", nbin, min, max ); datahistrevsideband = new TH1F( "mu_BDT__DATArevsideband", "mu_BDT__DATArevsideband", nbin, min, max ); wphjethistSB = new TH1F( "mu_BDT__wphjethist__JES__SB", "mu_BDT__wphjethist__JES__SB", nbin,min, max ); zjethistSB = new TH1F( "mu_BDT__zjethist__JES__SB", "mu_BDT__zjethist__JES__SB", nbin, min, max ); G_Pt_50to80SB= new TH1F( "mu_BDT__G_Pt_50to80SB", "mu_BDT__G_Pt_50to80SB", nbin, min, max ); G_Pt_80to120SB= new TH1F( "mu_BDT__G_Pt_80to120SB", "mu_BDT__G_Pt_80to120SB", nbin, min, max ); G_Pt_120to170SB= new TH1F( "mu_BDT__G_Pt_120to170SB", "mu_BDT__G_Pt_120to170SB", nbin, min, max ); G_Pt_170to300SB= new TH1F( "mu_BDT__G_Pt_170to300SB", "mu_BDT__G_Pt_170to300SB", nbin, min, max ); G_Pt_300to470SB= new TH1F( "mu_BDT__G_Pt_300to470SB", "mu_BDT__G_Pt_300to470SB", nbin, min, max ); G_Pt_470to800SB= new TH1F( "mu_BDT__G_Pt_470to800SB", "mu_BDT__G_Pt_470to800SB", nbin, min, max ); wjethistSB = new TH1F( "mu_BDT__wjethist__JES__SB", "mu_BDT__wjethist__JES__SB", nbin, min, max ); twchhistSB = new TH1F( "mu_BDT__twchhist__JES__SB", "mu_BDT__twchhist__JES__SB", nbin,min, max); tbarwhistSB = new TH1F( "mu_BDT__tbarwhist__JES__SB", "mu_BDT__tbarwhist__JES__SB", nbin,min, max ); tschhistSB = new TH1F( "mu_BDT__tschhist__JES__SB", "mu_BDT__tschhist__JES__SB", nbin, min, max ); tbarschhistSB = new TH1F( "mu_BDT__tbarschhist__JES__SB", "mu_BDT__tbarschhist__JES__SB", nbin, min, max ); ttchhistSB = new TH1F( "mu_BDT__ttchhist__JES__SB", "mu_BDT__ttchhist__JES__SB", nbin, min, max ); tbartchhistSB = new TH1F( "mu_BDT__tbartchhist__JES__SB", "mu_BDT__tbartchhist__JES__SB", nbin, min, max); tt1histSB = new TH1F( "mu_BDT__tt1hist__JES__SB", "mu_BDT__tt1hist__JES__SB", nbin, min, max ); tt2histSB = new TH1F( "mu_BDT__tt2hist__JES__SB", "mu_BDT__tt2hist__JES__SB", nbin, min, max ); tt3histSB = new TH1F( "mu_BDT__tt3hist__JES__SB", "mu_BDT__tt3hist__JES__SB", nbin, min, max ); ttphhistSB = new TH1F( "mu_BDT__ttphhist__JES__SB", "mu_BDT__ttphhist__JES__SB", nbin,min, max ); wwphhistSB = new TH1F( "mu_BDT__wwphhist__JES__SB", "BDT__wwphhist__JES__SB", nbin,min, max ); wwhistSB = new TH1F( "mu_BDT__wwhist__JES__SB", "mu_BDT__wwhist__JES__SB", nbin, min, max ); wzhistSB = new TH1F( "mu_BDT__wzhist__JES__SB", "mu_BDT__wzhist__JES__SB", nbin, min, max ); zzhistSB = new TH1F( "mu_BDT__zzhist__JES__SB", "mu_BDT__zzhist__JES__SB", nbin, min, max); zgammahistSB = new TH1F( "mu_BDT__zgammahist__JES__SB", "mu_BDT__zgammahist__JES__SB", nbin, min, max ); singletopphotonhistSB = new TH1F( "mu_BDT__singletopphotonhistSB", "mu_BDT__singletopphotonhistSB", nbin,min, max ); singleantitopphotonhistSB = new TH1F( "mu_BDT__singleantitopphotonhistSB", "mu_BDT__singleantitopphotonhistSB", nbin, min, max); signalhistSB = new TH1F( "mu_BDT__signal100__JES__SB", "mu_BDT__signal100__JES__SB", nbin,min, max ); std::vector<TH1F*> SBhists; SBhists.push_back(wjethistSB); SBhists.push_back(zjethistSB); SBhists.push_back(G_Pt_50to80SB); SBhists.push_back(G_Pt_80to120SB); SBhists.push_back(G_Pt_120to170SB); SBhists.push_back(G_Pt_170to300SB); SBhists.push_back(G_Pt_300to470SB); SBhists.push_back(G_Pt_470to800SB); SBhists.push_back(wphjethistSB); SBhists.push_back(twchhistSB); SBhists.push_back(tbarwhistSB); SBhists.push_back(tschhistSB); SBhists.push_back(tbarschhistSB); SBhists.push_back(ttchhistSB); SBhists.push_back(tbartchhistSB); SBhists.push_back(tt1histSB); SBhists.push_back(tt2histSB); SBhists.push_back(tt3histSB); SBhists.push_back(ttphhistSB); SBhists.push_back(wwphhistSB); SBhists.push_back(wwhistSB); SBhists.push_back(wzhistSB); SBhists.push_back(zzhistSB); SBhists.push_back(zgammahistSB); SBhists.push_back(singletopphotonhistSB); SBhists.push_back(singleantitopphotonhistSB); SBhists.push_back(signalhistSB); hists.push_back(wjethist); hists.push_back(zjethist); hists.push_back(G_Pt_50to80); hists.push_back(G_Pt_80to120); hists.push_back(G_Pt_120to170); hists.push_back(G_Pt_170to300); hists.push_back(G_Pt_300to470); hists.push_back(G_Pt_470to800); hists.push_back(wphjethist); hists.push_back(twchhist); hists.push_back(tbarwhist); hists.push_back(tschhist); hists.push_back(tbarschhist); hists.push_back(ttchhist); hists.push_back(tbartchhist); hists.push_back(tt1hist); hists.push_back(tt2hist); hists.push_back(tt3hist); hists.push_back(ttphhist); hists.push_back(wwphhist); hists.push_back(wwhist); hists.push_back(wzhist); hists.push_back(zzhist); hists.push_back(zgammahist); hists.push_back(singletopphotonhist); hists.push_back(singleantitopphotonhist); hists.push_back(signalhist); for(unsigned int idx=0; idx<samples_.size(); ++idx){ hists[idx]->Sumw2();} datahists.push_back(data1hist); datahists.push_back(data2hist); datahists.push_back(data3hist); datahists.push_back(datahistsideband); for(unsigned int idx=0; idx<datasamples_.size(); ++idx){ datahists[idx]->Sumw2();} revDATAhists.push_back(data1histrev); revDATAhists.push_back(data2histrev); revDATAhists.push_back(data3histrev); revDATAhists.push_back(datahistrevsideband); double insidewphjet=0; double outsidewphjet=0; double insidewjet=0; double outsidewjet=0; double nsignalevent=0; double mtopup=220; double mtopdown=130; //bool SR=false; //bool SB=true; bool SR=true; bool SB=false; for(unsigned int idx=0; idx<samples_.size(); ++idx){ TString fname =samples_[idx]; if (!gSystem->AccessPathName( fname )) input = TFile::Open( fname ); // check if file in local directory exists else input = TFile::Open( "http://root.cern.ch/files/tmva_class_example.root" ); // if not: download from ROOT server if (!input) { std::cout << "ERROR: could not open data file" << std::endl; exit(1); } std::cout << "--- TMVAClassificationApp : Using input file: " << input->GetName() << std::endl; // --- Event loop // Prepare the event tree // - here the variable names have to corres[1]ponds to your tree // - you can use the same variables as above which is slightly faster, // but of course you can use different ones and copy the values inside the event loop // //Double_t myptphoton,myetaphoton,myptmuon,myetamuon,myptjet,myetajet,mymasstop,mymtw,mydeltaRphotonjet,mydeltaRphotonmuon,myht,mycostopphoton,mydeltaphiphotonmet,mycvsdiscriminant,myjetmultiplicity,mybjetmultiplicity,myleptoncharge; std::vector<double> *myptphoton=0; std::vector<double> *myetaphoton=0; std::vector<double> *myptmuon=0; std::vector<double> *myetamuon=0; std::vector<double> *myptjet=0; std::vector<double> *myetajet=0; std::vector<double> *mymasstop=0; //std::vector<double> *mymtw=0; std::vector<double> *mydeltaRphotonjet=0; std::vector<double> *mydeltaRphotonmuon=0; //std::vector<double> *myht=0; std::vector<double> *mycostopphoton=0; std::vector<double> *mydeltaphiphotonmet=0; std::vector<double> *mycvsdiscriminant=0; std::vector<double> *myjetmultiplicity=0; //std::vector<double> *mybjetmultiplicity=0; //std::vector<double> *myleptoncharge=0; std::vector<double> *myweight=0; std::vector<double> *myjetmatchinginfo=0; std::vector<double> *mycoswphoton=0; std::cout << "--- Select signal sample" << std::endl; TTree* theTree = (TTree*)input->Get("analyzestep2/atq"); // Int_t myjetmultiplicity, mybjetmultiplicity , myleptoncharge; // Float_t userVar1, userVar2; theTree->SetBranchAddress("ptphoton", &myptphoton ); theTree->SetBranchAddress( "etaphoton", &myetaphoton ); theTree->SetBranchAddress( "ptmuon", &myptmuon ); theTree->SetBranchAddress( "etamuon", &myetamuon ); theTree->SetBranchAddress( "ptjet", &myptjet ); theTree->SetBranchAddress( "etajet", &myetajet ); theTree->SetBranchAddress( "masstop", &mymasstop ); // theTree->SetBranchAddress( "mtw", &mymtw ); theTree->SetBranchAddress( "deltaRphotonjet", &mydeltaRphotonjet ); theTree->SetBranchAddress( "deltaRphotonmuon", &mydeltaRphotonmuon ); // theTree->SetBranchAddress( "ht", &myht ); theTree->SetBranchAddress( "costopphoton", &mycostopphoton ); theTree->SetBranchAddress( "jetmultiplicity", &myjetmultiplicity ); // theTree->SetBranchAddress( "bjetmultiplicity", &mybjetmultiplicity ); theTree->SetBranchAddress( "deltaphiphotonmet", &mydeltaphiphotonmet ); theTree->SetBranchAddress( "cvsdiscriminant", &mycvsdiscriminant ); // theTree->SetBranchAddress( "leptoncharge", &myleptoncharge ); theTree->SetBranchAddress( "weight", &myweight); theTree->SetBranchAddress( "coswphoton", &mycoswphoton ); theTree->SetBranchAddress( "jetmatchinginfo", &myjetmatchinginfo ); // std::cout << "--- Processing: " << theTree->GetEntries() << " events" << std::endl; TStopwatch sw; sw.Start(); for (Long64_t ievt=0; ievt<theTree->GetEntries();ievt++) { // std::cout << "--- ... Processing event: " << ievt << std::endl; double finalweight; if (ievt%1000 == 0) std::cout << "--- ... Processing event: " << ievt << std::endl; theTree->GetEntry(ievt); //for (int l=0;l<sizeof(myptphoton);l++){ //std::cout << "--- ... reza: " << myptphoton[l] <<std::endl; //} //std::cout << "--- ......................."<< (*mycvsdiscriminant)[0]<<std::endl; // --- Return the MVA outputs and fill into histograms finalweight=(*myweight)[0]; //cout<<(*myweight)[0]<<endl; if((*mymasstop )[0]>mtopdown && (*mymasstop )[0]<mtopup){ hists[idx] ->Fill(Bmodification((*mycvsdiscriminant)[0],(*myjetmatchinginfo)[0]),finalweight ); if (samples_[idx]=="WPHJET.root")insidewphjet=insidewphjet+finalweight; if (samples_[idx]=="SIGNALtGu.root")nsignalevent=nsignalevent+1; //cout<<insidewphjet<<endl; } else { SBhists[idx] ->Fill( Bmodification((*mycvsdiscriminant)[0],(*myjetmatchinginfo)[0]),finalweight ); if (samples_[idx]=="WPHJET.root")outsidewphjet=outsidewphjet+finalweight;} // Retrieve also per-event error } delete myptphoton; delete myetaphoton; delete myptmuon; delete myetamuon; delete myptjet; delete myetajet; delete mymasstop; //delete mymtw; delete mydeltaRphotonjet; delete mydeltaRphotonmuon; //delete myht; delete mycostopphoton; delete mydeltaphiphotonmet; delete mycvsdiscriminant; delete myjetmultiplicity; //delete mybjetmultiplicity; //delete myleptoncharge; //delete myplot; } for(unsigned int idx=0; idx<datasamples_.size(); ++idx){ TString fname =datasamples_[idx]; if (!gSystem->AccessPathName( fname )) input = TFile::Open( fname ); // check if file in local directory exists else input = TFile::Open( "http://root.cern.ch/files/tmva_class_example.root" ); // if not: download from ROOT server if (!input) { std::cout << "ERROR: could not open data file" << std::endl; exit(1); } std::cout << "--- TMVAClassificationApp : Using input file: " << input->GetName() << std::endl; // --- Event loop // Prepare the event tree // - here the variable names have to corres[1]ponds to your tree // - you can use the same variables as above which is slightly faster, // but of course you can use different ones and copy the values inside the event loop // //Double_t myptphoton,myetaphoton,myptmuon,myetamuon,myptjet,myetajet,mymasstop,mymtw,mydeltaRphotonjet,mydeltaRphotonmuon,myht,mycostopphoton,mydeltaphiphotonmet,mycvsdiscriminant,myjetmultiplicity,mybjetmultiplicity,myleptoncharge; std::vector<double> *myptphoton=0; std::vector<double> *myetaphoton=0; std::vector<double> *myptmuon=0; std::vector<double> *myetamuon=0; std::vector<double> *myptjet=0; std::vector<double> *myetajet=0; std::vector<double> *mymasstop=0; //std::vector<double> *mymtw=0; std::vector<double> *mydeltaRphotonjet=0; std::vector<double> *mydeltaRphotonmuon=0; //std::vector<double> *myht=0; std::vector<double> *mycostopphoton=0; std::vector<double> *mydeltaphiphotonmet=0; std::vector<double> *mycvsdiscriminant=0; std::vector<double> *myjetmultiplicity=0; //std::vector<double> *mybjetmultiplicity=0; //std::vector<double> *myleptoncharge=0; std::vector<double> *mycoswphoton=0; std::cout << "--- Select signal sample" << std::endl; TTree* theTree = (TTree*)input->Get("analyzestep2/atq"); // Int_t myjetmultiplicity, mybjetmultiplicity , myleptoncharge; // Float_t userVar1, userVar2; theTree->SetBranchAddress("ptphoton", &myptphoton ); theTree->SetBranchAddress( "etaphoton", &myetaphoton ); theTree->SetBranchAddress( "ptmuon", &myptmuon ); theTree->SetBranchAddress( "etamuon", &myetamuon ); theTree->SetBranchAddress( "ptjet", &myptjet ); theTree->SetBranchAddress( "etajet", &myetajet ); theTree->SetBranchAddress( "masstop", &mymasstop ); // theTree->SetBranchAddress( "mtw", &mymtw ); theTree->SetBranchAddress( "deltaRphotonjet", &mydeltaRphotonjet ); theTree->SetBranchAddress( "deltaRphotonmuon", &mydeltaRphotonmuon ); // theTree->SetBranchAddress( "ht", &myht ); theTree->SetBranchAddress( "costopphoton", &mycostopphoton ); theTree->SetBranchAddress( "jetmultiplicity", &myjetmultiplicity ); // theTree->SetBranchAddress( "bjetmultiplicity", &mybjetmultiplicity ); theTree->SetBranchAddress( "deltaphiphotonmet", &mydeltaphiphotonmet ); theTree->SetBranchAddress( "cvsdiscriminant", &mycvsdiscriminant ); theTree->SetBranchAddress( "coswphoton", &mycoswphoton ); // theTree->SetBranchAddress( "leptoncharge", &myleptoncharge ); for (Long64_t ievt=0; ievt<theTree->GetEntries();ievt++) { // std::cout << "--- ... Processing event: " << ievt << std::endl; theTree->GetEntry(ievt); // --- Return the MVA outputs and fill into histograms //leptoncharge=(float)(*myleptoncharge )[0]; if((*mymasstop )[0]>mtopdown && (*mymasstop )[0]<mtopup) datahists[idx] ->Fill( (*mycvsdiscriminant)[0] ); else datahists[3]->Fill( (*mycvsdiscriminant)[0] ); } delete myptphoton; delete myetaphoton; delete myptmuon; delete myetamuon; delete myptjet; delete myetajet; delete mymasstop; //delete mymtw; delete mydeltaRphotonjet; delete mydeltaRphotonmuon; //delete myht; delete mycostopphoton; delete mydeltaphiphotonmet; delete mycvsdiscriminant; delete myjetmultiplicity; //delete mybjetmultiplicity; //delete myleptoncharge; //delete myplot; } for(unsigned int idx=0; idx<datasamplesreverse_.size(); ++idx){ TString fname =datasamplesreverse_[idx]; if (!gSystem->AccessPathName( fname )) input = TFile::Open( fname ); // check if file in local directory exists else input = TFile::Open( "http://root.cern.ch/files/tmva_class_example.root" ); // if not: download from ROOT server if (!input) { std::cout << "ERROR: could not open data file" << std::endl; exit(1); } std::cout << "--- TMVAClassificationApp : Using input file: " << input->GetName() << std::endl; std::vector<double> *myptphoton=0; std::vector<double> *myetaphoton=0; std::vector<double> *myptmuon=0; std::vector<double> *myetamuon=0; std::vector<double> *myptjet=0; std::vector<double> *myetajet=0; std::vector<double> *mymasstop=0; //std::vector<double> *mymtw=0; std::vector<double> *mydeltaRphotonjet=0; std::vector<double> *mydeltaRphotonmuon=0; //std::vector<double> *myht=0; std::vector<double> *mycostopphoton=0; std::vector<double> *mydeltaphiphotonmet=0; std::vector<double> *mycvsdiscriminant=0; std::vector<double> *myjetmultiplicity=0; std::vector<double> *mycoswphoton=0; //std::vector<double> *mybjetmultiplicity=0; //std::vector<double> *myleptoncharge=0; TTree* theTree = (TTree*)input->Get("analyzestep2/atq"); theTree->SetBranchAddress("ptphoton", &myptphoton ); theTree->SetBranchAddress( "etaphoton", &myetaphoton ); theTree->SetBranchAddress( "ptmuon", &myptmuon ); theTree->SetBranchAddress( "etamuon", &myetamuon ); theTree->SetBranchAddress( "ptjet", &myptjet ); theTree->SetBranchAddress( "etajet", &myetajet ); theTree->SetBranchAddress( "masstop", &mymasstop ); // theTree->SetBranchAddress( "mtw", &mymtw ); theTree->SetBranchAddress( "deltaRphotonjet", &mydeltaRphotonjet ); theTree->SetBranchAddress( "deltaRphotonmuon", &mydeltaRphotonmuon ); // theTree->SetBranchAddress( "ht", &myht ); theTree->SetBranchAddress( "costopphoton", &mycostopphoton ); theTree->SetBranchAddress( "jetmultiplicity", &myjetmultiplicity ); // theTree->SetBranchAddress( "bjetmultiplicity", &mybjetmultiplicity ); theTree->SetBranchAddress( "deltaphiphotonmet", &mydeltaphiphotonmet ); theTree->SetBranchAddress( "cvsdiscriminant", &mycvsdiscriminant ); theTree->SetBranchAddress( "coswphoton", &mycoswphoton ); // theTree->SetBranchAddress( "leptoncharge", &myleptoncharge ); // Efficiency calculator for cut method for (Long64_t ievt=0; ievt<theTree->GetEntries();ievt++) { // std::cout << "--- ... Processing event: " << ievt << std::endl; if (ievt%1000 == 0) std::cout << "--- ... Processing event: " << ievt << std::endl; theTree->GetEntry(ievt); if((*mymasstop )[0]>mtopdown && (*mymasstop )[0]<mtopup) { //revDATAhists[idx]->Fill( reader->EvaluateMVA( "BDT method" ) ); insidewjet=insidewjet+1; revDATAhists[idx]->Fill( (*mycvsdiscriminant)[0]); } else { //revDATAhists[3]->Fill( reader->EvaluateMVA( "BDT method" ) ); outsidewjet=outsidewjet+1; revDATAhists[3]->Fill( (*mycvsdiscriminant)[0]); } //cout<<insidewjet<<endl; } delete myptphoton; delete myetaphoton; delete myptmuon; delete myetamuon; delete myptjet; delete myetajet; delete mymasstop; //delete mymtw; delete mydeltaRphotonjet; delete mydeltaRphotonmuon; //delete myht; delete mycostopphoton; delete mydeltaphiphotonmet; delete mycvsdiscriminant; delete myjetmultiplicity; ////delete mybjetmultiplicity; ////delete myleptoncharge; ////delete myplot; // } double wphjetscale; wphjetscale=insidewphjet/(insidewphjet+outsidewphjet); cout<<"wphjetscale= "<<wphjetscale<<endl; double wjetscale; wjetscale=insidewjet/(insidewjet+outsidewjet); cout<<"wjetscale= "<<wjetscale<<endl; cout<<"nsignalevent= "<<nsignalevent<<endl; //cout<<insidewphjet<<"insidewphjet"<<" "<<wphjetscale<<" "<<insidewjet/(insidewjet+outsidewjet)<<endl; float lumi = 1; if (SR==true){ double ff=0; for(unsigned int idx=0; idx<samples_.size(); ++idx){ hists[idx]->Scale(lumi*scales[idx]); if (idx !=0 && idx!=3){ ff=hists[idx]->Integral()+ff; cout<<samples_[idx]<<" = "<<hists[idx]->Integral()<<" " <<ff<<endl;} } for(unsigned int idx=0; idx<samples_.size(); ++idx){ SBhists[idx]->Scale(lumi*scales[idx]);} THStack *hs1 = new THStack("hs1","BDT output"); for(unsigned int idx=1; idx<datasamplesreverse_.size(); ++idx){ revDATAhists[idx]->Add(revDATAhists[idx-1]); } //cout<<"*********************"<< datahists[3]->Integral()<<" "<<wphjetscale<<endl; //cout<<"*********************"<< revDATAhists[2]->Integral()<<" "<<wjetscale<<endl; revDATAhists[2]->Scale(219.373/revDATAhists[2]->Integral()); for(unsigned int idx=1; idx<revDATAhists[2]->GetNbinsX()+1; ++idx){ //revDATAhists[2]->SetBinError(idx,(revDATAhists[2]->GetBinContent(idx)/revDATAhists[2]->Integral())*74.84); revDATAhists[2]->SetBinError(idx,0); //if (revDATAhists[2]->GetBinError(idx)>revDATAhists[2]->GetBinContent(idx)) revDATAhists[2]->SetBinError(idx, revDATAhists[2]->GetBinContent(idx)/2); } //revDATAhists[2]->Scale(wjetscale); revDATAhists[3]->Scale(219.373/revDATAhists[3]->Integral()); revDATAhists[3]->Scale((1-wjetscale)/wjetscale); for(unsigned int idx=1; idx<datasamples_.size(); ++idx){ datahists[idx]->Add(datahists[idx-1]);} cout<<" " <<datahists[2]->Integral()<<endl; datahists[3]->Add(revDATAhists[3],-1); datahists[3]->Add(SBhists[1],-1); datahists[3]->Add(SBhists[2],-1); for(unsigned int idx=9; idx<samples_.size()-1; ++idx){ datahists[3]->Add(SBhists[idx],-1);} for(unsigned int idx=1; idx<nbin; ++idx){ if (datahists[3]->GetBinContent(idx)<0)datahists[3]->SetBinContent(idx,0); } datahists[3]->Scale(1112.2/datahists[3]->Integral()); for(unsigned int idx=1; idx<datahists[3]->GetNbinsX()+1; ++idx){ //datahists[3]->SetBinError(idx,(datahists[3]->GetBinContent(idx)/datahists[3]->Integral())*139.11);} datahists[3]->SetBinError(idx,0);} TH1F *datatoMC(0); //datahists[3]->Scale(wphjetscale); //hists[1]->Add(revDATAhists[2]); //hists[2]->Add(hists[1]); //datahists[3]->Add(hists[2]); //hists[4]->Add(datahists[3]); //for(unsigned int idx=5; idx<samples_.size()-1; ++idx){ // hists[idx]->Add(hists[idx-1]);} //cout<<"**********real data***********"<< datahists[2]->Integral()<<" "<<wphjetscale<<endl; //cout<<"********** mc ***********"<< hists[18]->Integral()<<" "<<wjetscale<<endl; // setup the canvas and draw the histograms TH1F *sum_h= new TH1F ( *hists[1] ) ; sum_h->Sumw2(); for(unsigned int idx=2; idx<samples_.size()-1; ++idx){ if (idx!=8)sum_h->Add(hists[idx],1); } sum_h->Add(revDATAhists[2],1); sum_h->Add(datahists[3],1); std::vector<std::vector<double_t> > vecplus(photondownhists[0]->GetNbinsX(), vector<double>(18)); std::vector<std::vector<double_t> > vecminus(photondownhists[0]->GetNbinsX(), vector<double>(18)); for(int p = 0; p <photondownhists[0]->GetNbinsX(); p++){ //loop over bins for(int m = 0; m < 18; m++){ //loop over systematics vecplus[p][m]=0; vecminus[p][m]=0; if (vec[p][m]>sum_h->GetBinContent(p+1)) vecplus[p][m] = vec[p][m]-sum_h->GetBinContent(p+1); else if (vec[p][m]<sum_h->GetBinContent(p+1)) vecminus[p][m] = sum_h->GetBinContent(p+1)-vec[p][m]; cout<<vecplus[p][m]<<endl; }} TCanvas *c1 = new TCanvas("c1","signal region",50,50,865,780); c1->cd(); TPad *pad1 = new TPad("pad1","pad1",0,0.25,1,1); pad1->SetFillStyle(0); pad1->SetFrameFillStyle(0); pad1->SetBottomMargin(0); TPad *pad2 = new TPad("pad2","pad2",0,0,1,0.25); pad2->SetFillStyle(0); pad2->SetFrameFillStyle(0); pad2->SetTopMargin(0); pad2->SetBottomMargin(0.12/0.46); pad2->Draw(); pad1->Draw(); pad1->cd(); //W+jet revDATAhists[2]->SetFillColor(kBlue-2); revDATAhists[2]->SetLineColor(kBlack); hs1->Add(revDATAhists[2]); //Z+jet hists[1]->SetFillColor(kOrange-4); hists[1]->SetLineColor(kBlack); hs1->Add(hists[1]); //photon+jet hists[3]->Add(hists[2]); hists[4]->Add(hists[3]); hists[5]->Add(hists[4]); hists[6]->Add(hists[5]); hists[7]->Add(hists[6]); hists[7]->SetFillColor(19); //hs1->Add(hists[7]); //W+photon+jet datahists[3]->SetFillColor(kGreen-3); datahists[3]->SetLineColor(kBlack); hs1->Add(datahists[3]); //single top+singletop photon hists[5+5]->Add(hists[4+5]); hists[6+5]->Add(hists[5+5]); hists[7+5]->Add(hists[6+5]); hists[8+5]->Add(hists[7+5]); hists[9+5]->Add(hists[8+5]); hists[19+5]->Add(hists[9+5]); hists[20+5]->Add(hists[19+5]); hists[20+5]->SetFillColor(kRed+3); hists[20+5]->SetLineColor(kBlack); hs1->Add(hists[20+5]); //hists[9+5]->SetFillColor(kAzure+10); //hs1->Add(hists[9+5]); hists[11+5]->Add(hists[10+5]); hists[12+5]->Add(hists[11+5]); hists[13+5]->Add(hists[12+5]); hists[13+5]->SetFillColor(kPink+1); hists[13+5]->SetLineColor(kBlack); hs1->Add(hists[13+5]); //hists[13+5]->SetFillColor(17); //hs1->Add(hists[13+5]); //hists[14+5]->SetFillColor(kSpring-9); //hs1->Add(hists[14+5]); hists[15+5]->Add(hists[14+5]); hists[16+5]->Add(hists[15+5]); hists[17+5]->Add(hists[16+5]); hists[17+5]->SetFillColor(kViolet-7); hists[17+5]->SetLineColor(kBlack); hs1->Add(hists[17+5]); hists[18+5]->SetFillColor(kAzure+10); hists[18+5]->SetLineColor(kBlack); hs1->Add(hists[18+5]); //hists[20+5]->Add(hists[19+5]); //hists[20+5]->SetFillColor(kYellow+3); //hs1->Add(hists[20+5]); hs1->Draw("hist"); hs1->SetMaximum(1.6*datahists[2]->GetMaximum()); //hs1->GetXaxis()->SetTitle("BDT output"); hs1->GetYaxis()->SetTitle("Events / 0.05"); hs1->GetYaxis()->SetTitleSize(0.045); hs1->GetYaxis()->SetTitleFont(22); hs1->GetYaxis()->SetTitleOffset(0.8); hs1->GetYaxis()->SetLabelSize(0.044); hists[21+5]->SetLineColor(kRed+3); hists[21+5]->SetLineWidth(3); hists[21+5]->Draw("histsame"); datahists[2]->SetLineWidth(3.); datahists[2]->SetLineColor(kBlack); datahists[2]->SetMarkerColor(kBlack); datahists[2]->SetMarkerStyle(20.); datahists[2]->SetMarkerSize(1.35); datahists[2]->Draw("esame"); sum_h->SetLineColor(kBlack); sum_h->SetFillColor(1); sum_h->SetFillStyle(3001); sum_h->Draw("e2same"); TPaveText *pt = new TPaveText(0.1,0.95,0.4,0.95, "NDC"); // NDC sets coords pt->SetLineColor(10); // relative to pad dimensions pt->SetFillColor(10); // text is black on white pt->SetTextSize(0.045); pt->SetTextAlign(12); pt->AddText("CMS Preliminary, 19.1 fb^{-1}, #sqrt{s} = 8 TeV"); pt->SetShadowColor(10); pt->Draw("same"); std::vector<double_t> errorup(photondownhists[0]->GetNbinsX()); std::vector<double_t> errordown(photondownhists[0]->GetNbinsX()); for(int p = 0; p <photondownhists[0]->GetNbinsX(); p++){ //loop over bins for(int m = 0; m < 18; m++){ //loop over systematics if (m==0) {errorup[p]=0; errordown[p]=0;} errorup[p]=pow(vecplus[p][m],2)+errorup[p]; errordown[p]=pow(vecminus[p][m],2)+errordown[p]; } errorup[p]=pow(0.024*sum_h->GetBinContent(p+1),2)+errorup[p]; errordown[p]=pow(0.024*sum_h->GetBinContent(p+1),2)+errordown[p]; errorup[p]=pow(0.4*wjetandwphjet[0]->GetBinContent(p+1),2)+errorup[p]; errordown[p]=pow(0.4*wjetandwphjet[0]->GetBinContent(p+1),2)+errordown[p]; errorup[p]=pow(0.3*wjetandwphjet[1]->GetBinContent(p+1),2)+errorup[p]; errordown[p]=pow(0.3*wjetandwphjet[1]->GetBinContent(p+1),2)+errordown[p]; errorup[p]=pow(0.3*hists[1]->GetBinContent(p+1),2)+errorup[p]; errordown[p]=pow(0.3*hists[1]->GetBinContent(p+1),2)+errordown[p]; errorup[p]=pow(0.3*hists[20+5]->GetBinContent(p+1),2)+errorup[p]; errordown[p]=pow(0.3*hists[20+5]->GetBinContent(p+1),2)+errordown[p]; errorup[p]=pow(0.3*hists[13+5]->GetBinContent(p+1),2)+errorup[p]; errordown[p]=pow(0.3*hists[13+5]->GetBinContent(p+1),2)+errordown[p]; errorup[p]=pow(0.3*hists[17+5]->GetBinContent(p+1),2)+errorup[p]; errordown[p]=pow(0.3*hists[17+5]->GetBinContent(p+1),2)+errordown[p]; errorup[p]=pow(0.3*hists[18+5]->GetBinContent(p+1),2)+errorup[p]; errordown[p]=pow(0.3*hists[18+5]->GetBinContent(p+1),2)+errordown[p]; cout<<errorup[p]<<endl; cout<<errordown[p]<<endl; } double ax[photondownhists[0]->GetNbinsX()]; double ay[photondownhists[0]->GetNbinsX()]; double aexl[photondownhists[0]->GetNbinsX()]; double aexh[photondownhists[0]->GetNbinsX()]; double aeyl[photondownhists[0]->GetNbinsX()]; double aeyh[photondownhists[0]->GetNbinsX()]; for(int p = 0; p <photondownhists[0]->GetNbinsX(); p++){ //loop over bins ax[p]=min+(max-min)/(2*nbin)+p*((max-min)/nbin); ay[p]=sum_h->GetBinContent(p+1); aexl[p]=(max-min)/(2*nbin); aexh[p]=(max-min)/(2*nbin); aeyl[p]=sqrt(errordown[p]); aeyh[p]=sqrt(errorup[p]); } TGraphAsymmErrors* gae = new TGraphAsymmErrors(photondownhists[0]->GetNbinsX(), ax, ay, aexl, aexh, aeyl, aeyh); gae->SetFillColor(1); gae->SetFillStyle(3003); gae->Draw("e2same"); TLegend* leg = new TLegend(0.60,0.40,0.89,0.87); leg->SetFillStyle ( 0); leg->SetFillColor ( 0); leg->SetBorderSize( 0); leg->AddEntry( datahists[2], "Data" , "PL"); // leg->AddEntry( hists[25], "Single top+#gamma" , "F"); leg->AddEntry( hists[23], "Z#gamma" , "F"); leg->AddEntry( hists[22], "WW,WZ,ZZ,WW#gamma " , "F"); // leg->AddEntry( hists[19], "WW#gamma" , "F"); leg->AddEntry( hists[18], "t#bar{t}, t#bar{t}#gamma" , "F"); // leg->AddEntry( hists[17], "t#bar{t}" , "F"); leg->AddEntry( hists[25], "Single top, Single top+#gamma" , "F"); // leg->AddEntry( hists[14], "Single top" , "F"); leg->AddEntry( datahists[3], "W#gamma" , "F"); // leg->AddEntry( hists[7], "#gamma+jets" , "F"); leg->AddEntry( hists[1], "Z+jets" , "F"); leg->AddEntry(revDATAhists[2], "W+jets" , "F"); leg->AddEntry( hists[26], "Signal(tu#gamma) 1 pb" , "L"); leg->AddEntry(sum_h, "Stat uncertainty" , "F"); leg->AddEntry(gae, "Syst uncertainty" , "F"); // leg->AddEntry( datahists[2], "CMS Data 2012(19.145/fb)" , "PL"); leg->Draw("same"); sum_h->Draw("AXISSAMEY+"); sum_h->Draw("AXISSAMEX+"); pad1->Draw(); TCanvas *c22 = new TCanvas("c22","signal region22",50,50,865,780); c22->cd(); gae->Draw("a2"); gae->Draw("psame"); TH1F *h_ratio = (TH1F*)datahists[2]->Clone("h_copy"); h_ratio->Sumw2(); pad2->cd(); pad2->SetGridy(); datatoMC = new TH1F( "datatoMC", "datatoMC", nbin, min, max ); datatoMC->Sumw2(); datatoMC->Divide(datahists[2],sum_h); h_ratio->Divide(sum_h); h_ratio->SetFillStyle(3004); h_ratio->GetXaxis()->SetTitle("CSV discriminator"); h_ratio->GetYaxis()->SetTitle("DATA/MC"); h_ratio->GetXaxis()->SetTitleSize(0.12); h_ratio->GetYaxis()->SetTitleSize(0.12); h_ratio->GetXaxis()->SetTitleFont(22); h_ratio->GetYaxis()->SetTitleFont(22); h_ratio->GetXaxis()->SetTickLength(0.05); h_ratio->GetYaxis()->SetTickLength(0.05); h_ratio->GetXaxis()->SetLabelSize(0.14); h_ratio->GetYaxis()->SetLabelSize(0.14); h_ratio->GetYaxis()->SetTitleOffset(0.25); h_ratio->GetYaxis()->SetNdivisions(504); h_ratio->SetLineWidth(2); //h_ratio->SetStats(0); //h_ratio->SetMarkerStyle(20); h_ratio->SetMinimum(0); h_ratio->SetMaximum(2); h_ratio->Draw("E"); //datatoMC->Draw(""); TLine *l3 = new TLine(h_ratio->GetXaxis()->GetXmin(), 1.00, h_ratio->GetXaxis()->GetXmax(), 1.00); l3->SetLineWidth(1); //l3->SetLineStyle(7); //l3->Draw(); h_ratio->Draw("AXISSAMEY+"); h_ratio->Draw("AXISSAMEX+"); c1->Update(); for(unsigned int idx=1; idx<nbin+1; ++idx){ cout<<"MC "<<"nbin= "<<idx<<" content= "<<sum_h->GetBinContent(idx)<<endl; cout<<"signal "<<"nbin= "<<idx<<" content= "<<hists[21+5]->GetBinContent(idx)<<endl; cout<<"Data "<<"nbin= "<<idx<<" content= "<<datahists[2]->GetBinContent(idx)<<endl; } cout<<"signal Integral "<<hists[21+5]->Integral()<<endl; } if (SB==true){ for(unsigned int idx=0; idx<samples_.size(); ++idx){ SBhists[idx]->Scale(lumi*scales[idx]);} revDATAhists[3]->Scale(620.32/revDATAhists[3]->Integral()); revDATAhists[3]->Scale(1-wjetscale); SBhists[1]->Add(revDATAhists[3]); SBhists[2]->Add(SBhists[1]); SBhists[4]->Add(SBhists[2]); for(unsigned int idx=5; idx<samples_.size()-1; ++idx){ SBhists[idx]->Add(SBhists[idx-1]);} SBhists[20]->SetMaximum(1.5*datahists[3]->GetMaximum()); SBhists[20]->SetFillColor(kMagenta+2); SBhists[20]->Draw(); SBhists[18]->SetFillColor(kOrange+4); SBhists[18]->Draw("same"); SBhists[17]->SetFillColor(kOrange-2); SBhists[17]->Draw("same"); SBhists[16]->SetFillColor(kRed); SBhists[16]->Draw("same"); SBhists[15]->SetFillColor(kViolet+1); SBhists[15]->Draw("same"); SBhists[14]->SetFillColor(kSpring-9); SBhists[14]->Draw("same"); SBhists[13]->SetFillColor(32); SBhists[13]->Draw("same"); SBhists[12]->SetFillColor(6); SBhists[12]->Draw("same"); SBhists[9]->SetFillColor(4); SBhists[9]->Draw("same"); //hists[8]->SetFillColor(4); //hists[8]->Draw("same"); //hists[7]->SetFillColor(3); //hists[7]->Draw("same"); //hists[6]->SetFillColor(3); //hists[6]->Draw("same"); //hists[5]->SetFillColor(2); //hists[5]->Draw("same"); //hists[4]->SetFillColor(2); //hists[4]->Draw("same"); //hists[3]->SetFillColor(5); //hists[3]->Draw("same"); //datahists[3]->SetFillColor(5); //datahists[3]->Draw("same"); SBhists[2]->SetFillColor(8); SBhists[2]->Draw("same"); SBhists[1]->SetFillColor(kOrange+7); SBhists[1]->Draw("same"); revDATAhists[3]->SetFillColor(7); revDATAhists[3]->Draw("same"); //hists[0]->SetFillColor(7); //hists[0]->Draw("same"); SBhists[21]->SetFillColor(1); SBhists[21]->SetFillStyle(3004); SBhists[21]->Draw("same"); // plot data points datahists[3]->SetLineWidth(3.); datahists[3]->SetLineColor(kBlack); datahists[3]->SetMarkerColor(kBlack); datahists[3]->SetMarkerStyle(20.); datahists[3]->Draw("esame"); //conv->RedrawAxis(); TLegend* leg = new TLegend(0.60,0.40,0.89,0.87); leg->SetFillStyle ( 0); leg->SetFillColor ( 0); leg->SetBorderSize( 0); leg->AddEntry( revDATAhists[2], "W JET" , "F"); leg->AddEntry( SBhists[1], "Z JET" , "F"); leg->AddEntry( SBhists[2], "PH JET" , "F"); // leg->AddEntry( datahists[3], "W PH JET" , "F"); // leg->AddEntry( hists[5], "TOP-W-CH" , "F"); // leg->AddEntry( hists[5], "T-S-CH" , "F"); // leg->AddEntry( hists[7], "TOP-S-CH" , "F"); // leg->AddEntry( hists[7], "TTBAR-CH" , "F"); // leg->AddEntry( hists[8], "TBAR-W-CH" , "F"); leg->AddEntry( SBhists[9], "SINGLE TOP " , "F"); leg->AddEntry( SBhists[12], "TTBAR" , "F"); leg->AddEntry( SBhists[13], "TTG" , "F"); leg->AddEntry( SBhists[14], "WWG" , "F"); leg->AddEntry( SBhists[15], "WW" , "F"); leg->AddEntry( SBhists[16], "WZ" , "F"); leg->AddEntry( SBhists[17], "ZZ" , "F"); leg->AddEntry( SBhists[18], "ZGAMMA" , "F"); leg->AddEntry( SBhists[20], "SINGLE TOP+PHOTON" , "F"); leg->AddEntry( SBhists[21], "SIGNAL" , "F"); leg->AddEntry( datahists[3], "CMS Data 2012(19.145/fb)" , "PL"); leg->Draw("same"); } }
int quasirandom(int n = 10000, int skip = 0) { TH2D * h0 = new TH2D("h0","Pseudo-random Sequence",200,0,1,200,0,1); TH2D * h1 = new TH2D("h1","Sobol Sequence",200,0,1,200,0,1); TH2D * h2 = new TH2D("h2","Niederrer Sequence",200,0,1,200,0,1); RandomMT r0; // quasi random numbers need to be created giving the dimension of the sequence // in this case we generate 2-d sequence QuasiRandomSobol r1(2); QuasiRandomNiederreiter r2(2); // generate n random points double x[2]; TStopwatch w; w.Start(); for (int i = 0; i < n; ++i) { r0.RndmArray(2,x); h0->Fill(x[0],x[1]); } std::cout << "Time for gRandom "; w.Print(); w.Start(); if( skip>0) r1.Skip(skip); for (int i = 0; i < n; ++i) { r1.Next(x); h1->Fill(x[0],x[1]); } std::cout << "Time for Sobol "; w.Print(); w.Start(); if( skip>0) r2.Skip(skip); for (int i = 0; i < n; ++i) { r2.Next(x); h2->Fill(x[0],x[1]); } std::cout << "Time for Niederreiter "; w.Print(); TCanvas * c1 = new TCanvas("c1","Random sequence",600,1200); c1->Divide(1,3); c1->cd(1); h0->Draw("COLZ"); c1->cd(2); // check uniformity h1->Draw("COLZ"); c1->cd(3); h2->Draw("COLZ"); gPad->Update(); // test number of empty bins int nzerobins0 = 0; int nzerobins1 = 0; int nzerobins2 = 0; for (int i = 1; i <= h1->GetNbinsX(); ++i) { for (int j = 1; j <= h1->GetNbinsY(); ++j) { if (h0->GetBinContent(i,j) == 0 ) nzerobins0++; if (h1->GetBinContent(i,j) == 0 ) nzerobins1++; if (h2->GetBinContent(i,j) == 0 ) nzerobins2++; } } std::cout << "number of empty bins for pseudo-random = " << nzerobins0 << std::endl; std::cout << "number of empty bins for " << r1.Name() << "\t= " << nzerobins1 << std::endl; std::cout << "number of empty bins for " << r2.Name() << "\t= " << nzerobins2 << std::endl; int iret = 0; if (nzerobins1 >= nzerobins0 ) iret += 1; if (nzerobins2 >= nzerobins0 ) iret += 2; return iret; }
RooGaussian fitZToMuMuGammaMassUnbinned(const char *filename = "ZToMuMuGammaMass.txt", const char* plotOpt = "NEU", const int nbins = 25) { gROOT->ProcessLine(".L tdrstyle.C"); setTDRStyle(); gStyle->SetPadRightMargin(0.05); double minMass = 60; double maxMass = 120; RooRealVar mass("mass","M(#mu#mu#gamma})", minMass, maxMass,"GeV/c^{2}"); // Read data set RooDataSet *data = RooDataSet::read(filename,RooArgSet(mass)); // RooDataSet *dataB = RooDataSet::read(filenameB,RooArgSet(mass)); // Build p.d.f. //////////////////////////////////////////////// // Parameters // //////////////////////////////////////////////// // Signal p.d.f. parameters // Parameters for a Gaussian and a Crystal Ball Lineshape RooRealVar m0 ("m_{0}", "Bias", 91.19, minMass, maxMass,"GeV/c^{2}"); RooRealVar sigma("#sigma","Width", 3.0,1.0,10.0,"GeV/c^{2}"); RooRealVar cut ("#alpha","Cut", 0.6,0.6,2.0); RooRealVar power("power","Power", 10.0, 0.5, 20.0); // Background p.d.f. parameters // Parameters for a polynomial lineshape RooRealVar c0("c_{0}", "c0", 0., -10, 10); RooRealVar c1("c_{1}", "c1", 0., -100, 0); RooRealVar c2("c_{2}", "c2", 0., -100, 100); // c0.setConstant(); // fraction of signal // RooRealVar frac("frac", "Signal Fraction", 0.1,0.,0.3.); RooRealVar nsig("N_{S}", "#signal events", 9000, 0.,10000.); RooRealVar nbkg("N_{B}", "#background events", 1000,2,10000.); //////////////////////////////////////////////// // P.D.F.s // //////////////////////////////////////////////// // Di-photon mass signal p.d.f. RooGaussian signal("signal", "A Gaussian Lineshape", mass, m0, sigma); // RooCBShape signal("signal", "A Crystal Ball Lineshape", mass, m0,sigma, cut, power); // Di-photon mass background p.d.f. RooPolynomial bg("bg", "Backgroung Distribution", mass, RooArgList(c0,c1)); // Di-photon mass model p.d.f. // RooAddPdf model("model", "Di-photon mass model", signal, bg, frac); RooAddPdf model("model", "Di-photon mass model", RooArgList(signal, bg), RooArgList(nsig, nbkg)); TStopwatch t ; t.Start() ; // model->fitTo(*data,FitOptions("mh"),Optimize(0),Timer(1)); signal->fitTo(*data,FitOptions("mh"),Optimize(0),Timer(1)); t.Print() ; c = new TCanvas("c","Unbinned Invariant Mass Fit", 0,0,800,600); // Plot the fit results RooPlot* plot = mass.frame(Range(minMass,maxMass),Bins(nbins)); // Plot 1 // dataB->plotOn(plot, MarkerColor(kRed), LineColor(kRed)); data->plotOn(plot); // model.plotOn(plot); model.plotOn(plot); //model.paramOn(plot, Format(plotOpt, AutoPrecision(1)), Parameters(RooArgSet(nsig, nbkg, m0, sigma))); model.paramOn(plot, Format(plotOpt, AutoPrecision(1)), Parameters(RooArgSet(m0, sigma))); /// model.plotOn(plot, Components("signal"), LineStyle(kDashed), LineColor(kRed)); // model.plotOn(plot, Components("bg"), LineStyle(kDashed), LineColor(kRed)); plot->Draw(); TLatex * tex = new TLatex(0.2,0.8,"CMS preliminary"); tex->SetNDC(); tex->SetTextFont(42); tex->SetLineWidth(2); tex->Draw(); tex->DrawLatex(0.2, 0.725, "7 TeV Data, L = 258 pb^{-1}"); float fsig_peak = NormalizedIntegral(signal, mass, m0.getVal() - 2.5*sigma.getVal(), m0.getVal() + 2.5*sigma.getVal() ); // float fbkg_peak = NormalizedIntegral(bg, // mass, // m0.getVal() - 2.5*sigma.getVal(), // m0.getVal() + 2.5*sigma.getVal() // ); double nsigVal = fsig_peak * nsig.getVal(); double nsigErr = fsig_peak * nsig.getError(); double nsigErrRel = nsigErr / nsigVal; // double nbkgVal = fbkg_peak * nbkg.getVal(); // double nbkgErr = fbkg_peak * nbkg.getError(); // double nbkgErrRel = nbkgErr / nbkgVal; cout << "nsig " << nsigVal << " +/- " << nsigErr << endl; // cout << "S/B_{#pm2.5#sigma} " << nsigVal/nbkgVal << " +/- " // << (nsigVal/nbkgVal)*sqrt(nsigErrRel*nsigErrRel + nbkgErrRel*nbkgErrRel) // << endl; // tex->DrawLatex(0.2, 0.6, Form("N_{S} = %.0f#pm%.0f", nsigVal, nsigErr) ); // tex->DrawLatex(0.2, 0.525, Form("S/B_{#pm2.5#sigma} = %.1f", nsigVal/nbkgVal) ); // tex->DrawLatex(0.2, 0.45, Form("#frac{S}{#sqrt{B}}_{#pm2.5#sigma} = %.1f", nsigVal/sqrt(nbkgVal))); leg = new TLegend(0.65,0.6,0.9,0.75); leg->SetFillColor(kWhite); leg->SetLineColor(kWhite); leg->SetShadowColor(kWhite); leg->SetTextFont(42); // TLegendEntry * ldata = leg->AddEntry(data, "Opposite Sign"); // TLegendEntry * ldataB = leg->AddEntry(dataB, "Same Sign"); // ldata->SetMarkerStyle(20); // ldataB->SetMarkerStyle(20); // ldataB->SetMarkerColor(kRed); leg->Draw(); return signal; }
void run_trac_its(Int_t nEvents = 10, TString mcEngine = "TGeant3"){ // Initialize logger FairLogger *logger = FairLogger::GetLogger(); logger->SetLogVerbosityLevel("LOW"); logger->SetLogScreenLevel("INFO"); // Input and output file name std::stringstream inputfile, outputfile, paramfile; inputfile << "AliceO2_" << mcEngine << ".clus_" << nEvents << "_event.root"; paramfile << "AliceO2_" << mcEngine << ".params_" << nEvents << ".root"; outputfile << "AliceO2_" << mcEngine << ".trac_" << nEvents << "_event.root"; // Setup timer TStopwatch timer; // Setup FairRoot analysis manager FairRunAna * fRun = new FairRunAna(); FairFileSource *fFileSource = new FairFileSource(inputfile.str().c_str()); fRun->SetSource(fFileSource); fRun->SetOutputFile(outputfile.str().c_str()); // Setup Runtime DB FairRuntimeDb* rtdb = fRun->GetRuntimeDb(); FairParRootFileIo* parInput1 = new FairParRootFileIo(); parInput1->open(paramfile.str().c_str()); rtdb->setFirstInput(parInput1); // Setup tracker // To run with n threads call AliceO2::ITS::CookedTrackerTask(n) AliceO2::ITS::CookedTrackerTask *trac = new AliceO2::ITS::CookedTrackerTask; fRun->AddTask(trac); fRun->Init(); AliceO2::Field::MagneticField* fld = (AliceO2::Field::MagneticField*)fRun->GetField(); if (!fld) { std::cout << "Failed to get field instance from FairRunAna" << std::endl; return; } trac->setBz(fld->solenoidField()); //in kG timer.Start(); fRun->Run(); std::cout << std::endl << std::endl; // Extract the maximal used memory an add is as Dart measurement // This line is filtered by CTest and the value send to CDash FairSystemInfo sysInfo; Float_t maxMemory=sysInfo.GetMaxMemory(); std::cout << "<DartMeasurement name=\"MaxMemory\" type=\"numeric/double\">"; std::cout << maxMemory; std::cout << "</DartMeasurement>" << std::endl; timer.Stop(); Double_t rtime = timer.RealTime(); Double_t ctime = timer.CpuTime(); Float_t cpuUsage=ctime/rtime; cout << "<DartMeasurement name=\"CpuLoad\" type=\"numeric/double\">"; cout << cpuUsage; cout << "</DartMeasurement>" << endl; cout << endl << endl; cout << "Macro finished succesfully." << endl; std::cout << endl << std::endl; std::cout << "Output file is " << outputfile.str() << std::endl; //std::cout << "Parameter file is " << parFile << std::endl; std::cout << "Real time " << rtime << " s, CPU time " << ctime << "s" << endl << endl; }
void Classify_HWW( TString myMethodList = "" ) { #ifdef __CINT__ gROOT->ProcessLine( ".O0" ); // turn off optimization in CINT #endif //-------------------------------------------------------------------- // path to weights dir (this is where MVA training info is stored) // output root file will be stored at [path]/output //-------------------------------------------------------------------- TString path = "Trainings/v5/H160_WW_10vars_dphi10/"; //TString path = "./"; //----------------------------------- // select samples to run over //----------------------------------- char* babyPath = "/tas/cerati/HtoWWmvaBabies/latest"; int mH = 160; // choose Higgs mass vector<char*> samples; samples.push_back("WWTo2L2Nu"); samples.push_back("GluGluToWWTo4L"); samples.push_back("WZ"); samples.push_back("ZZ"); samples.push_back("TTJets"); samples.push_back("tW"); samples.push_back("WJetsToLNu"); samples.push_back("DY"); //samples.push_back("WJetsFO3"); if ( mH == 130 ) samples.push_back("Higgs130"); else if( mH == 160 ) samples.push_back("Higgs160"); else if( mH == 200 ) samples.push_back("Higgs200"); else{ cout << "Error, unrecognized Higgs mass " << mH << " GeV, quitting" << endl; exit(0); } //-------------------------------------------------------------------------------- // IMPORTANT: set the following variables to the same set used for MVA training!!! //-------------------------------------------------------------------------------- std::map<std::string,int> mvaVar; mvaVar[ "lephard_pt" ] = 1; mvaVar[ "lepsoft_pt" ] = 1; mvaVar[ "dil_dphi" ] = 1; mvaVar[ "dil_mass" ] = 1; mvaVar[ "event_type" ] = 0; mvaVar[ "met_projpt" ] = 1; mvaVar[ "met_pt" ] = 0; mvaVar[ "mt_lephardmet" ] = 1; mvaVar[ "mt_lepsoftmet" ] = 1; mvaVar[ "mthiggs" ] = 1; mvaVar[ "dphi_lephardmet" ] = 1; mvaVar[ "dphi_lepsoftmet" ] = 1; mvaVar[ "lepsoft_fbrem" ] = 0; mvaVar[ "lepsoft_eOverPIn" ] = 0; mvaVar[ "lepsoft_qdphi" ] = 0; //--------------------------------------------------------------- // This loads the library TMVA::Tools::Instance(); // Default MVA methods to be trained + tested std::map<std::string,int> Use; // --- Cut optimisation Use["Cuts"] = 1; Use["CutsD"] = 1; Use["CutsPCA"] = 0; Use["CutsGA"] = 0; Use["CutsSA"] = 0; // // --- 1-dimensional likelihood ("naive Bayes estimator") Use["Likelihood"] = 1; Use["LikelihoodD"] = 0; // the "D" extension indicates decorrelated input variables (see option strings) Use["LikelihoodPCA"] = 1; // the "PCA" extension indicates PCA-transformed input variables (see option strings) Use["LikelihoodKDE"] = 0; Use["LikelihoodMIX"] = 0; // // --- Mutidimensional likelihood and Nearest-Neighbour methods Use["PDERS"] = 1; Use["PDERSD"] = 0; Use["PDERSPCA"] = 0; Use["PDEFoam"] = 1; Use["PDEFoamBoost"] = 0; // uses generalised MVA method boosting Use["KNN"] = 1; // k-nearest neighbour method // // --- Linear Discriminant Analysis Use["LD"] = 1; // Linear Discriminant identical to Fisher Use["Fisher"] = 0; Use["FisherG"] = 0; Use["BoostedFisher"] = 0; // uses generalised MVA method boosting Use["HMatrix"] = 0; // // --- Function Discriminant analysis Use["FDA_GA"] = 1; // minimisation of user-defined function using Genetics Algorithm Use["FDA_SA"] = 0; Use["FDA_MC"] = 0; Use["FDA_MT"] = 0; Use["FDA_GAMT"] = 0; Use["FDA_MCMT"] = 0; // // --- Neural Networks (all are feed-forward Multilayer Perceptrons) Use["MLP"] = 0; // Recommended ANN Use["MLPBFGS"] = 0; // Recommended ANN with optional training method Use["MLPBNN"] = 1; // Recommended ANN with BFGS training method and bayesian regulator Use["CFMlpANN"] = 0; // Depreciated ANN from ALEPH Use["TMlpANN"] = 0; // ROOT's own ANN // // --- Support Vector Machine Use["SVM"] = 1; // // --- Boosted Decision Trees Use["BDT"] = 1; // uses Adaptive Boost Use["BDTG"] = 0; // uses Gradient Boost Use["BDTB"] = 0; // uses Bagging Use["BDTD"] = 0; // decorrelation + Adaptive Boost // // --- Friedman's RuleFit method, ie, an optimised series of cuts ("rules") Use["RuleFit"] = 1; // --------------------------------------------------------------- Use["Plugin"] = 0; Use["Category"] = 0; Use["SVM_Gauss"] = 0; Use["SVM_Poly"] = 0; Use["SVM_Lin"] = 0; std::cout << std::endl; std::cout << "==> Start TMVAClassificationApplication" << std::endl; // Select methods (don't look at this code - not of interest) if (myMethodList != "") { for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) it->second = 0; std::vector<TString> mlist = gTools().SplitString( myMethodList, ',' ); for (UInt_t i=0; i<mlist.size(); i++) { std::string regMethod(mlist[i]); if (Use.find(regMethod) == Use.end()) { std::cout << "Method \"" << regMethod << "\" not known in TMVA under this name. Choose among the following:" << std::endl; for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) { std::cout << it->first << " "; } std::cout << std::endl; return; } Use[regMethod] = 1; } } // -------------------------------------------------------------------------------------------------- const unsigned int nsamples = samples.size(); for( unsigned int i = 0 ; i < nsamples ; ++i ){ // --- Create the Reader object TMVA::Reader *reader = new TMVA::Reader( "!Color:!Silent" ); // Create a set of variables and declare them to the reader // - the variable names MUST corresponds in name and type to those given in the weight file(s) used // Float_t var1, var2; // Float_t var3, var4; // reader->AddVariable( "myvar1 := var1+var2", &var1 ); // reader->AddVariable( "myvar2 := var1-var2", &var2 ); // reader->AddVariable( "var3", &var3 ); // reader->AddVariable( "var4", &var4 ); Float_t lephard_pt; Float_t lepsoft_pt; Float_t dil_dphi; Float_t dil_mass; Float_t event_type; Float_t met_projpt; Float_t met_pt; Float_t mt_lephardmet; Float_t mt_lepsoftmet; Float_t mthiggs; Float_t dphi_lephardmet; Float_t dphi_lepsoftmet; Float_t lepsoft_fbrem; Float_t lepsoft_eOverPIn; Float_t lepsoft_qdphi; if( mvaVar["lephard_pt"]) reader->AddVariable( "lephard_pt" , &lephard_pt ); if( mvaVar["lepsoft_pt"]) reader->AddVariable( "lepsoft_pt" , &lepsoft_pt ); if( mvaVar["dil_dphi"]) reader->AddVariable( "dil_dphi" , &dil_dphi ); if( mvaVar["dil_mass"]) reader->AddVariable( "dil_mass" , &dil_mass ); if( mvaVar["event_type"]) reader->AddVariable( "event_type" , &event_type ); if( mvaVar["met_projpt"]) reader->AddVariable( "met_projpt" , &met_pt ); if( mvaVar["met_pt"]) reader->AddVariable( "met_pt" , &met_pt ); if( mvaVar["mt_lephardmet"]) reader->AddVariable( "mt_lephardmet" , &mt_lephardmet ); if( mvaVar["mt_lepsoftmet"]) reader->AddVariable( "mt_lepsoftmet" , &mt_lepsoftmet ); if( mvaVar["mthiggs"]) reader->AddVariable( "mthiggs" , &mthiggs ); if( mvaVar["dphi_lephardmet"]) reader->AddVariable( "dphi_lephardmet" , &dphi_lephardmet ); if( mvaVar["dphi_lepsoftmet"]) reader->AddVariable( "dphi_lepsoftmet" , &dphi_lepsoftmet ); if( mvaVar["lepsoft_fbrem"]) reader->AddVariable( "lepsoft_fbrem" , &lepsoft_fbrem ); if( mvaVar["lepsoft_eOverPIn"]) reader->AddVariable( "lepsoft_eOverPIn" , &lepsoft_eOverPIn ); if( mvaVar["lepsoft_qdphi"]) reader->AddVariable( "lepsoft_q * lepsoft_dPhiIn" , &lepsoft_qdphi ); // Spectator variables declared in the training have to be added to the reader, too // Float_t spec1,spec2; // reader->AddSpectator( "spec1 := var1*2", &spec1 ); // reader->AddSpectator( "spec2 := var1*3", &spec2 ); Float_t Category_cat1, Category_cat2, Category_cat3; if (Use["Category"]){ // Add artificial spectators for distinguishing categories // reader->AddSpectator( "Category_cat1 := var3<=0", &Category_cat1 ); // reader->AddSpectator( "Category_cat2 := (var3>0)&&(var4<0)", &Category_cat2 ); // reader->AddSpectator( "Category_cat3 := (var3>0)&&(var4>=0)", &Category_cat3 ); } // --- Book the MVA methods //-------------------------------------------------------------------------------------- // tell Classify_HWW where to find the weights dir, which contains the trained MVA's. // In this example, the weights dir is located at [path]/[dir] // and the output root file is written to [path]/[output] //-------------------------------------------------------------------------------------- TString dir = path + "weights/"; TString outdir = path + "output/"; TString prefix = "TMVAClassification"; // Book method(s) for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) { if (it->second) { TString methodName = TString(it->first) + TString(" method"); TString weightfile = dir + prefix + TString("_") + TString(it->first) + TString(".weights.xml"); reader->BookMVA( methodName, weightfile ); } } // Book output histograms UInt_t nbin = 1000; TH1F *histLk(0), *histLkD(0), *histLkPCA(0), *histLkKDE(0), *histLkMIX(0), *histPD(0), *histPDD(0); TH1F *histPDPCA(0), *histPDEFoam(0), *histPDEFoamErr(0), *histPDEFoamSig(0), *histKNN(0), *histHm(0); TH1F *histFi(0), *histFiG(0), *histFiB(0), *histLD(0), *histNn(0),*histNnbfgs(0),*histNnbnn(0); TH1F *histNnC(0), *histNnT(0), *histBdt(0), *histBdtG(0), *histBdtD(0), *histRf(0), *histSVMG(0); TH1F *histSVMP(0), *histSVML(0), *histFDAMT(0), *histFDAGA(0), *histCat(0), *histPBdt(0); if (Use["Likelihood"]) histLk = new TH1F( "MVA_Likelihood", "MVA_Likelihood", nbin, -1, 1 ); if (Use["LikelihoodD"]) histLkD = new TH1F( "MVA_LikelihoodD", "MVA_LikelihoodD", nbin, -1, 0.9999 ); if (Use["LikelihoodPCA"]) histLkPCA = new TH1F( "MVA_LikelihoodPCA", "MVA_LikelihoodPCA", nbin, -1, 1 ); if (Use["LikelihoodKDE"]) histLkKDE = new TH1F( "MVA_LikelihoodKDE", "MVA_LikelihoodKDE", nbin, -0.00001, 0.99999 ); if (Use["LikelihoodMIX"]) histLkMIX = new TH1F( "MVA_LikelihoodMIX", "MVA_LikelihoodMIX", nbin, 0, 1 ); if (Use["PDERS"]) histPD = new TH1F( "MVA_PDERS", "MVA_PDERS", nbin, 0, 1 ); if (Use["PDERSD"]) histPDD = new TH1F( "MVA_PDERSD", "MVA_PDERSD", nbin, 0, 1 ); if (Use["PDERSPCA"]) histPDPCA = new TH1F( "MVA_PDERSPCA", "MVA_PDERSPCA", nbin, 0, 1 ); if (Use["KNN"]) histKNN = new TH1F( "MVA_KNN", "MVA_KNN", nbin, 0, 1 ); if (Use["HMatrix"]) histHm = new TH1F( "MVA_HMatrix", "MVA_HMatrix", nbin, -0.95, 1.55 ); if (Use["Fisher"]) histFi = new TH1F( "MVA_Fisher", "MVA_Fisher", nbin, -4, 4 ); if (Use["FisherG"]) histFiG = new TH1F( "MVA_FisherG", "MVA_FisherG", nbin, -1, 1 ); if (Use["BoostedFisher"]) histFiB = new TH1F( "MVA_BoostedFisher", "MVA_BoostedFisher", nbin, -2, 2 ); if (Use["LD"]) histLD = new TH1F( "MVA_LD", "MVA_LD", nbin, -2, 2 ); if (Use["MLP"]) histNn = new TH1F( "MVA_MLP", "MVA_MLP", nbin, -1.25, 1.5 ); if (Use["MLPBFGS"]) histNnbfgs = new TH1F( "MVA_MLPBFGS", "MVA_MLPBFGS", nbin, -1.25, 1.5 ); if (Use["MLPBNN"]) histNnbnn = new TH1F( "MVA_MLPBNN", "MVA_MLPBNN", nbin, -1.25, 1.5 ); if (Use["CFMlpANN"]) histNnC = new TH1F( "MVA_CFMlpANN", "MVA_CFMlpANN", nbin, 0, 1 ); if (Use["TMlpANN"]) histNnT = new TH1F( "MVA_TMlpANN", "MVA_TMlpANN", nbin, -1.3, 1.3 ); if (Use["BDT"]) histBdt = new TH1F( "MVA_BDT", "MVA_BDT", nbin, -1. , 1. ); if (Use["BDTD"]) histBdtD = new TH1F( "MVA_BDTD", "MVA_BDTD", nbin, -0.8, 0.8 ); if (Use["BDTG"]) histBdtG = new TH1F( "MVA_BDTG", "MVA_BDTG", nbin, -1.0, 1.0 ); if (Use["RuleFit"]) histRf = new TH1F( "MVA_RuleFit", "MVA_RuleFit", nbin, -2.0, 2.0 ); if (Use["SVM_Gauss"]) histSVMG = new TH1F( "MVA_SVM_Gauss", "MVA_SVM_Gauss", nbin, 0.0, 1.0 ); if (Use["SVM_Poly"]) histSVMP = new TH1F( "MVA_SVM_Poly", "MVA_SVM_Poly", nbin, 0.0, 1.0 ); if (Use["SVM_Lin"]) histSVML = new TH1F( "MVA_SVM_Lin", "MVA_SVM_Lin", nbin, 0.0, 1.0 ); if (Use["FDA_MT"]) histFDAMT = new TH1F( "MVA_FDA_MT", "MVA_FDA_MT", nbin, -2.0, 3.0 ); if (Use["FDA_GA"]) histFDAGA = new TH1F( "MVA_FDA_GA", "MVA_FDA_GA", nbin, -2.0, 3.0 ); if (Use["Category"]) histCat = new TH1F( "MVA_Category", "MVA_Category", nbin, -2., 2. ); if (Use["Plugin"]) histPBdt = new TH1F( "MVA_PBDT", "MVA_BDT", nbin, -0.8, 0.8 ); if (Use["Likelihood"]) histLk ->Sumw2(); if (Use["LikelihoodD"]) histLkD ->Sumw2(); if (Use["LikelihoodPCA"]) histLkPCA ->Sumw2(); if (Use["LikelihoodKDE"]) histLkKDE ->Sumw2(); if (Use["LikelihoodMIX"]) histLkMIX ->Sumw2(); if (Use["PDERS"]) histPD ->Sumw2(); if (Use["PDERSD"]) histPDD ->Sumw2(); if (Use["PDERSPCA"]) histPDPCA ->Sumw2(); if (Use["KNN"]) histKNN ->Sumw2(); if (Use["HMatrix"]) histHm ->Sumw2(); if (Use["Fisher"]) histFi ->Sumw2(); if (Use["FisherG"]) histFiG ->Sumw2(); if (Use["BoostedFisher"]) histFiB ->Sumw2(); if (Use["LD"]) histLD ->Sumw2(); if (Use["MLP"]) histNn ->Sumw2(); if (Use["MLPBFGS"]) histNnbfgs ->Sumw2(); if (Use["MLPBNN"]) histNnbnn ->Sumw2(); if (Use["CFMlpANN"]) histNnC ->Sumw2(); if (Use["TMlpANN"]) histNnT ->Sumw2(); if (Use["BDT"]) histBdt ->Sumw2(); if (Use["BDTD"]) histBdtD ->Sumw2(); if (Use["BDTG"]) histBdtG ->Sumw2(); if (Use["RuleFit"]) histRf ->Sumw2(); if (Use["SVM_Gauss"]) histSVMG ->Sumw2(); if (Use["SVM_Poly"]) histSVMP ->Sumw2(); if (Use["SVM_Lin"]) histSVML ->Sumw2(); if (Use["FDA_MT"]) histFDAMT ->Sumw2(); if (Use["FDA_GA"]) histFDAGA ->Sumw2(); if (Use["Category"]) histCat ->Sumw2(); if (Use["Plugin"]) histPBdt ->Sumw2(); // PDEFoam also returns per-event error, fill in histogram, and also fill significance if (Use["PDEFoam"]) { histPDEFoam = new TH1F( "MVA_PDEFoam", "MVA_PDEFoam", nbin, 0, 1 ); histPDEFoamErr = new TH1F( "MVA_PDEFoamErr", "MVA_PDEFoam error", nbin, 0, 1 ); histPDEFoamSig = new TH1F( "MVA_PDEFoamSig", "MVA_PDEFoam significance", nbin, 0, 10 ); } // Book example histogram for probability (the other methods are done similarly) TH1F *probHistFi(0), *rarityHistFi(0); if (Use["Fisher"]) { probHistFi = new TH1F( "MVA_Fisher_Proba", "MVA_Fisher_Proba", nbin, 0, 1 ); rarityHistFi = new TH1F( "MVA_Fisher_Rarity", "MVA_Fisher_Rarity", nbin, 0, 1 ); } // Prepare input tree (this must be replaced by your data source) // in this example, there is a toy tree with signal and one with background events // we'll later on use only the "signal" events for the test in this example. // TChain *ch = new TChain("Events"); if( strcmp( samples.at(i) , "DY" ) == 0 ){ ch -> Add( Form("%s/DYToMuMuM20_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/DYToMuMuM10To20_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/DYToEEM20_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/DYToEEM10To20_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/DYToTauTauM20_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/DYToTauTauM10To20_PU_testFinal_baby.root",babyPath) ); } if( strcmp( samples.at(i) , "WJetsFO3" ) == 0 ){ ch -> Add( Form("%s/WJetsToLNu_FOv3_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/WToLNu_FOv3_testFinal_baby.root",babyPath) ); } else if( strcmp( samples.at(i) , "Higgs130" ) == 0 ){ ch -> Add( Form("%s/HToWWTo2L2NuM130_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/HToWWToLNuTauNuM130_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/HToWWTo2Tau2NuM130_PU_testFinal_baby.root",babyPath) ); } else if( strcmp( samples.at(i) , "Higgs160" ) == 0 ){ ch -> Add( Form("%s/HToWWTo2L2NuM160_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/HToWWToLNuTauNuM160_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/HToWWTo2Tau2NuM160_PU_testFinal_baby.root",babyPath) ); } else if( strcmp( samples.at(i) , "Higgs200" ) == 0 ){ ch -> Add( Form("%s/HToWWTo2L2NuM200_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/HToWWToLNuTauNuM200_PU_testFinal_baby.root",babyPath) ); ch -> Add( Form("%s/HToWWTo2Tau2NuM200_PU_testFinal_baby.root",babyPath) ); } else{ ch -> Add( Form("%s/%s_PU_testFinal_baby.root",babyPath,samples.at(i)) ); } // --- Event loop // Prepare the event tree // - here the variable names have to corresponds to your tree // - you can use the same variables as above which is slightly faster, // but of course you can use different ones and copy the values inside the event loop // TTree *theTree = (TTree*) ch; std::cout << "--- Using input files: -------------------" << std::endl; TObjArray *listOfFiles = ch->GetListOfFiles(); TIter fileIter(listOfFiles); TChainElement* currentFile = 0; while((currentFile = (TChainElement*)fileIter.Next())) { std::cout << currentFile->GetTitle() << std::endl; } Float_t lephard_pt_; Float_t lepsoft_pt_; Float_t lepsoft_fr_; Float_t dil_dphi_; Float_t dil_mass_; Float_t event_type_; Float_t met_projpt_; Int_t jets_num_; Int_t extralep_num_; Int_t lowptbtags_num_; Int_t softmu_num_; Float_t event_scale1fb_; Float_t met_pt_; Int_t lepsoft_passTighterId_; Float_t mt_lephardmet_; Float_t mt_lepsoftmet_; Float_t mthiggs_; Float_t dphi_lephardmet_; Float_t dphi_lepsoftmet_; Float_t lepsoft_fbrem_; Float_t lepsoft_eOverPIn_; Float_t lepsoft_q_; Float_t lepsoft_dPhiIn_; theTree->SetBranchAddress( "lephard_pt_" , &lephard_pt_ ); theTree->SetBranchAddress( "lepsoft_pt_" , &lepsoft_pt_ ); theTree->SetBranchAddress( "lepsoft_fr_" , &lepsoft_fr_ ); theTree->SetBranchAddress( "dil_dphi_" , &dil_dphi_ ); theTree->SetBranchAddress( "dil_mass_" , &dil_mass_ ); theTree->SetBranchAddress( "event_type_" , &event_type_ ); theTree->SetBranchAddress( "met_projpt_" , &met_projpt_ ); theTree->SetBranchAddress( "jets_num_" , &jets_num_ ); theTree->SetBranchAddress( "extralep_num_" , &extralep_num_ ); theTree->SetBranchAddress( "lowptbtags_num_" , &lowptbtags_num_ ); theTree->SetBranchAddress( "softmu_num_" , &softmu_num_ ); theTree->SetBranchAddress( "event_scale1fb_" , &event_scale1fb_ ); theTree->SetBranchAddress( "lepsoft_passTighterId_" , &lepsoft_passTighterId_ ); theTree->SetBranchAddress( "met_pt_" , &met_pt_ ); theTree->SetBranchAddress( "mt_lephardmet_" , &mt_lephardmet_ ); theTree->SetBranchAddress( "mt_lepsoftmet_" , &mt_lepsoftmet_ ); theTree->SetBranchAddress( "mthiggs_" , &mthiggs_ ); theTree->SetBranchAddress( "dphi_lephardmet_" , &dphi_lephardmet_ ); theTree->SetBranchAddress( "dphi_lepsoftmet_" , &dphi_lepsoftmet_ ); theTree->SetBranchAddress( "lepsoft_fbrem_" , &lepsoft_fbrem_ ); theTree->SetBranchAddress( "lepsoft_eOverPIn_" , &lepsoft_eOverPIn_ ); theTree->SetBranchAddress( "lepsoft_q_" , &lepsoft_q_ ); theTree->SetBranchAddress( "lepsoft_dPhiIn_" , &lepsoft_dPhiIn_ ); // Efficiency calculator for cut method Int_t nSelCutsGA = 0; Double_t effS = 0.7; std::vector<Float_t> vecVar(4); // vector for EvaluateMVA tests std::cout << "--- Processing: " << theTree->GetEntries() << " events" << std::endl; TStopwatch sw; sw.Start(); int npass = 0; float yield = 0.; for (Long64_t ievt=0; ievt<theTree->GetEntries();ievt++) { if (ievt%1000 == 0) std::cout << "--- ... Processing event: " << ievt << std::endl; theTree->GetEntry(ievt); //------------------------------------------------------- // event selection //------------------------------------------------------- if( dil_dphi_ > 1. ) continue; //em if( event_type_ > 0.5 && event_type_ < 2.5 ){ if( met_projpt_ < 20. ) continue; } //ee/mm if( event_type_ < 0.5 || event_type_ > 2.5 ){ if( met_projpt_ < 35. ) continue; } if( lephard_pt_ < 20. ) continue; if( jets_num_ > 0 ) continue; if( extralep_num_ > 0 ) continue; if( lowptbtags_num_ > 0 ) continue; if( softmu_num_ > 0 ) continue; if( dil_mass_ < 12. ) continue; if( lepsoft_passTighterId_ == 0 ) continue; //if( event_type_ < 1.5 ) continue; //if( event_type > 1.5 && lepsoft_pt_ < 15. ) continue; //mH-dependent selection if( mH == 130 ){ if( lepsoft_pt_ < 10. ) continue; if( dil_mass_ > 90. ) continue; } else if( mH == 160 ){ if( lepsoft_pt_ < 20. ) continue; if( dil_mass_ > 100. ) continue; } else if( mH == 200 ){ if( lepsoft_pt_ < 20. ) continue; if( dil_mass_ > 130. ) continue; } float weight = event_scale1fb_ * lepsoft_fr_ * 0.5; //-------------------------------------------------------- // important: here we associate branches to MVA variables //-------------------------------------------------------- lephard_pt = lephard_pt_; lepsoft_pt = lepsoft_pt_; dil_mass = dil_mass_; dil_dphi = dil_dphi_; event_type = event_type_; met_pt = met_pt_; met_projpt = met_projpt_; mt_lephardmet = mt_lephardmet_; mt_lepsoftmet = mt_lepsoftmet_; mthiggs = mthiggs_; dphi_lephardmet = dphi_lephardmet_; dphi_lepsoftmet = dphi_lepsoftmet_; lepsoft_fbrem = lepsoft_fbrem_; lepsoft_eOverPIn = lepsoft_eOverPIn_; lepsoft_qdphi = lepsoft_q_ * lepsoft_dPhiIn_; npass++; yield+=weight; // var1 = userVar1 + userVar2; // var2 = userVar1 - userVar2; // --- Return the MVA outputs and fill into histograms if (Use["CutsGA"]) { // Cuts is a special case: give the desired signal efficienciy Bool_t passed = reader->EvaluateMVA( "CutsGA method", effS ); if (passed) nSelCutsGA++; } if (Use["Likelihood" ]) histLk ->Fill( reader->EvaluateMVA( "Likelihood method" ) , weight); if (Use["LikelihoodD" ]) histLkD ->Fill( reader->EvaluateMVA( "LikelihoodD method" ) , weight); if (Use["LikelihoodPCA"]) histLkPCA ->Fill( reader->EvaluateMVA( "LikelihoodPCA method" ) , weight); if (Use["LikelihoodKDE"]) histLkKDE ->Fill( reader->EvaluateMVA( "LikelihoodKDE method" ) , weight); if (Use["LikelihoodMIX"]) histLkMIX ->Fill( reader->EvaluateMVA( "LikelihoodMIX method" ) , weight); if (Use["PDERS" ]) histPD ->Fill( reader->EvaluateMVA( "PDERS method" ) , weight); if (Use["PDERSD" ]) histPDD ->Fill( reader->EvaluateMVA( "PDERSD method" ) , weight); if (Use["PDERSPCA" ]) histPDPCA ->Fill( reader->EvaluateMVA( "PDERSPCA method" ) , weight); if (Use["KNN" ]) histKNN ->Fill( reader->EvaluateMVA( "KNN method" ) , weight); if (Use["HMatrix" ]) histHm ->Fill( reader->EvaluateMVA( "HMatrix method" ) , weight); if (Use["Fisher" ]) histFi ->Fill( reader->EvaluateMVA( "Fisher method" ) , weight); if (Use["FisherG" ]) histFiG ->Fill( reader->EvaluateMVA( "FisherG method" ) , weight); if (Use["BoostedFisher"]) histFiB ->Fill( reader->EvaluateMVA( "BoostedFisher method" ) , weight); if (Use["LD" ]) histLD ->Fill( reader->EvaluateMVA( "LD method" ) , weight); if (Use["MLP" ]) histNn ->Fill( reader->EvaluateMVA( "MLP method" ) , weight); if (Use["MLPBFGS" ]) histNnbfgs ->Fill( reader->EvaluateMVA( "MLPBFGS method" ) , weight); if (Use["MLPBNN" ]) histNnbnn ->Fill( reader->EvaluateMVA( "MLPBNN method" ) , weight); if (Use["CFMlpANN" ]) histNnC ->Fill( reader->EvaluateMVA( "CFMlpANN method" ) , weight); if (Use["TMlpANN" ]) histNnT ->Fill( reader->EvaluateMVA( "TMlpANN method" ) , weight); if (Use["BDT" ]) histBdt ->Fill( reader->EvaluateMVA( "BDT method" ) , weight); if (Use["BDTD" ]) histBdtD ->Fill( reader->EvaluateMVA( "BDTD method" ) , weight); if (Use["BDTG" ]) histBdtG ->Fill( reader->EvaluateMVA( "BDTG method" ) , weight); if (Use["RuleFit" ]) histRf ->Fill( reader->EvaluateMVA( "RuleFit method" ) , weight); if (Use["SVM_Gauss" ]) histSVMG ->Fill( reader->EvaluateMVA( "SVM_Gauss method" ) , weight); if (Use["SVM_Poly" ]) histSVMP ->Fill( reader->EvaluateMVA( "SVM_Poly method" ) , weight); if (Use["SVM_Lin" ]) histSVML ->Fill( reader->EvaluateMVA( "SVM_Lin method" ) , weight); if (Use["FDA_MT" ]) histFDAMT ->Fill( reader->EvaluateMVA( "FDA_MT method" ) , weight); if (Use["FDA_GA" ]) histFDAGA ->Fill( reader->EvaluateMVA( "FDA_GA method" ) , weight); if (Use["Category" ]) histCat ->Fill( reader->EvaluateMVA( "Category method" ) , weight); if (Use["Plugin" ]) histPBdt ->Fill( reader->EvaluateMVA( "P_BDT method" ) , weight); // Retrieve also per-event error if (Use["PDEFoam"]) { Double_t val = reader->EvaluateMVA( "PDEFoam method" ); Double_t err = reader->GetMVAError(); histPDEFoam ->Fill( val ); histPDEFoamErr->Fill( err ); if (err>1.e-50) histPDEFoamSig->Fill( val/err , weight); } // Retrieve probability instead of MVA output if (Use["Fisher"]) { probHistFi ->Fill( reader->GetProba ( "Fisher method" ) , weight); rarityHistFi->Fill( reader->GetRarity( "Fisher method" ) , weight); } } std::cout << npass << " events passing selection, yield " << yield << std::endl; // Get elapsed time sw.Stop(); std::cout << "--- End of event loop: "; sw.Print(); // Get efficiency for cuts classifier if (Use["CutsGA"]) std::cout << "--- Efficiency for CutsGA method: " << double(nSelCutsGA)/theTree->GetEntries() << " (for a required signal efficiency of " << effS << ")" << std::endl; if (Use["CutsGA"]) { // test: retrieve cuts for particular signal efficiency // CINT ignores dynamic_casts so we have to use a cuts-secific Reader function to acces the pointer TMVA::MethodCuts* mcuts = reader->FindCutsMVA( "CutsGA method" ) ; if (mcuts) { std::vector<Double_t> cutsMin; std::vector<Double_t> cutsMax; mcuts->GetCuts( 0.7, cutsMin, cutsMax ); std::cout << "--- -------------------------------------------------------------" << std::endl; std::cout << "--- Retrieve cut values for signal efficiency of 0.7 from Reader" << std::endl; for (UInt_t ivar=0; ivar<cutsMin.size(); ivar++) { std::cout << "... Cut: " << cutsMin[ivar] << " < \"" << mcuts->GetInputVar(ivar) << "\" <= " << cutsMax[ivar] << std::endl; } std::cout << "--- -------------------------------------------------------------" << std::endl; } } // --- Write histograms cout << "dir " << dir << endl; char* mydir = outdir; TFile *target = new TFile( Form("%s/%s.root",mydir,samples.at(i) ) ,"RECREATE" ); cout << "Writing to file " << Form("%s/%s.root",mydir,samples.at(i) ) << endl; if (Use["Likelihood" ]) histLk ->Write(); if (Use["LikelihoodD" ]) histLkD ->Write(); if (Use["LikelihoodPCA"]) histLkPCA ->Write(); if (Use["LikelihoodKDE"]) histLkKDE ->Write(); if (Use["LikelihoodMIX"]) histLkMIX ->Write(); if (Use["PDERS" ]) histPD ->Write(); if (Use["PDERSD" ]) histPDD ->Write(); if (Use["PDERSPCA" ]) histPDPCA ->Write(); if (Use["KNN" ]) histKNN ->Write(); if (Use["HMatrix" ]) histHm ->Write(); if (Use["Fisher" ]) histFi ->Write(); if (Use["FisherG" ]) histFiG ->Write(); if (Use["BoostedFisher"]) histFiB ->Write(); if (Use["LD" ]) histLD ->Write(); if (Use["MLP" ]) histNn ->Write(); if (Use["MLPBFGS" ]) histNnbfgs ->Write(); if (Use["MLPBNN" ]) histNnbnn ->Write(); if (Use["CFMlpANN" ]) histNnC ->Write(); if (Use["TMlpANN" ]) histNnT ->Write(); if (Use["BDT" ]) histBdt ->Write(); if (Use["BDTD" ]) histBdtD ->Write(); if (Use["BDTG" ]) histBdtG ->Write(); if (Use["RuleFit" ]) histRf ->Write(); if (Use["SVM_Gauss" ]) histSVMG ->Write(); if (Use["SVM_Poly" ]) histSVMP ->Write(); if (Use["SVM_Lin" ]) histSVML ->Write(); if (Use["FDA_MT" ]) histFDAMT ->Write(); if (Use["FDA_GA" ]) histFDAGA ->Write(); if (Use["Category" ]) histCat ->Write(); if (Use["Plugin" ]) histPBdt ->Write(); // Write also error and significance histos if (Use["PDEFoam"]) { histPDEFoam->Write(); histPDEFoamErr->Write(); histPDEFoamSig->Write(); } // Write also probability hists if (Use["Fisher"]) { if (probHistFi != 0) probHistFi->Write(); if (rarityHistFi != 0) rarityHistFi->Write(); } target->Close(); delete reader; std::cout << "==> TMVAClassificationApplication is done with sample " << samples.at(i) << endl << std::endl; } }
void califaAna_batch(Int_t nEvents=1, Int_t fGeoVer=1, Double_t fThres=0.000050, Double_t fExpRes=5., Double_t fDelPolar=3.2, Double_t fDelAzimuthal=3.2) { cout << "Running califaAna_batch with arguments:" <<endl; cout << "Number of events: " << nEvents <<endl; cout << "CALIFA geo version: " << fGeoVer <<endl; cout << "Threshold: " << fThres <<endl<<endl; cout << "Experimental resolution: " << fExpRes <<endl<<endl; // In general, the following parts need not be touched // ======================================================================== // ---- Debug option ------------------------------------------------- gDebug = 0; // ------------------------------------------------------------------------ // ----- Timer -------------------------------------------------------- TStopwatch timer; timer.Start(); // ------------------------------------------------------------------------ // ----- Create analysis run ---------------------------------------- FairRunAna* fRun = new FairRunAna(); FairRuntimeDb* rtdb = fRun->GetRuntimeDb(); FairParRootFileIo* parIo1 = new FairParRootFileIo(); parIo1->open("r3bpar.root"); rtdb->setFirstInput(parIo1); rtdb->print(); fRun->SetInputFile("r3bsim.root"); fRun->SetOutputFile("califaAna.root"); // ----- Analysis routines for CALIFA R3BCaloHitFinder* caloHF = new R3BCaloHitFinder(); //Selecting the geometry version // 0- CALIFA 5.0, including BARREL and ENDCAP. // 1- CALIFA 7.05, only BARREL // 2- CALIFA 7.07, only BARREL // 3- CALIFA 7.09, only BARREL (ongoing work) // 4- CALIFA 7.17, only ENDCAP (in CsI[Tl]) // 5- CALIFA 7.07+7.17, // 6- CALIFA 7.09+7.17, (ongoing work) // 10- CALIFA 8.11, only BARREL (ongoing work) // ... caloHF->SelectGeometryVersion(fGeoVer); //caloHF->SelectGeometryVersion(10); caloHF->SetDetectionThreshold(fThres); //50 KeV [fThres in GeV] caloHF->SetExperimentalResolution(fExpRes); //5% at 1 MeV caloHF->SetAngularWindow(fDelPolar,fDelAzimuthal); //[0.25 around 14.3 degrees, 3.2 for the complete calorimeter] fRun->AddTask(caloHF); fRun->Init(); fRun->Run(0, nEvents); // ----- Finish ------------------------------------------------------- timer.Stop(); Double_t rtime = timer.RealTime(); Double_t ctime = timer.CpuTime(); cout << endl << endl; cout << "Macro finished succesfully." << endl; cout << "Real time " << rtime << " s, CPU time " << ctime << " s" << endl; cout << endl; // ------------------------------------------------------------------------ }
void TMVAClassificationApplication_TX(TString myMethodList = "" , TString iFileName = "", TString sampleLocation = "", TString outputLocation = "") { #ifdef __CINT__ gROOT->ProcessLine( ".O0" ); // turn off optimization in CINT #endif //--------------------------------------------------------------- // This loads the library TMVA::Tools::Instance(); // Default MVA methods to be trained + tested std::map<std::string,int> Use; // --- Cut optimisation Use["Cuts"] = 0; Use["CutsD"] = 0; Use["CutsPCA"] = 0; Use["CutsGA"] = 0; Use["CutsSA"] = 0; // // // --- Boosted Decision Trees Use["BDT"] = 1; // uses Adaptive Boost std::cout << std::endl; std::cout << "==> Start TMVAClassificationApplication" << std::endl; // Select methods (don't look at this code - not of interest) if (myMethodList != "") { for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) it->second = 0; std::vector<TString> mlist = gTools().SplitString( myMethodList, ',' ); for (UInt_t i=0; i<mlist.size(); i++) { std::string regMethod(mlist[i]); if (Use.find(regMethod) == Use.end()) { std::cout << "Method \"" << regMethod << "\" not known in TMVA under this name. Choose among the following:" << std::endl; for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) { std::cout << it->first << " "; } std::cout << std::endl; return; } Use[regMethod] = 1; } } // -------------------------------------------------------------------------------------------------- // --- Create the Reader object TMVA::Reader *reader = new TMVA::Reader("!Color:!Silent" ); // Create a set of variables and declare them to the reader // - the variable names MUST corresponds in name and type to those given in the weight file(s) used Float_t var1, var2, var3, var4, var5, var6, var7, var8, var9, var10, var11, var12, var13, var14, var15, var16, var17, var18, var19, var20, var21, var22, var23, var24, var25, var26, var27, var28, var29; //reader->AddVariable( "Alt$(jet_pt_singleLepCalc[0],0)", &var1); //reader->AddVariable( "Alt$(jet_pt_singleLepCalc[1],0)", &var2 ); //reader->AddVariable( "Alt$(jet_pt_singleLepCalc[2],0)", &var3 ); reader->AddVariable( "Alt$(bJetPt_CATopoCalc[0],0)", &var4 ); reader->AddVariable( "Alt$(bJetPt_CATopoCalc[1],0)", &var5 ); //reader->AddVariable( "corr_met_singleLepCalc", &var6 ); //reader->AddVariable( "muon_1_pt_singleLepCalc", &var7 ); //reader->AddVariable( "nBJets_CATopoCalc", &var8 ); //reader->AddVariable( "nSelJets_CommonCalc", &var9 ); //reader->AddVariable( "LeptonJet_DeltaR_LjetsTopoCalcNew", &var10); reader->AddVariable( "Mevent_LjetsTopoCalcNew", &var11); //reader->AddVariable( "W_Pt_LjetsTopoCalcNew", &var12 ); reader->AddVariable( "Jet1Jet2_Pt_LjetsTopoCalcNew", &var13 ); //reader->AddVariable( "BestTop_LjetsTopoCalcNew", &var14 ); //reader->AddVariable( "BTagTopMass_LjetsTopoCalcNew", &var15 ); //reader->AddVariable( "Alt$(CAHEPTopJetMass_JetSubCalc[0],0)", &var16 ); //reader->AddVariable( "Alt$(CAWCSVMSubJets_JetSubCalc[0],0)", &var17 ); //reader->AddVariable( "Alt$(CAWCSVLSubJets_JetSubCalc[0],0)", &var18 ); reader->AddVariable( "Alt$(CAWJetPt_JetSubCalc[0],0)", &var19 ); reader->AddVariable( "Alt$(CAWJetMass_JetSubCalc[0],0)", &var20 ); //reader->AddVariable( "Alt$(CAHEPTopJetMass_JetSubCalc[1],0)", &var21 ); //reader->AddVariable( "Hz_LjetsTopoCalcNew", &var22 ); //reader->AddVariable( "Centrality_LjetsTopoCalcNew", &var23 ); reader->AddVariable( "SqrtsT_LjetsTopoCalcNew", &var24 ); reader->AddVariable( "CAMindrBMass_CATopoCalc", &var28 ); reader->AddVariable( "minDRCAtoB_CATopoCalc", &var29 ); //reader->AddVariable( "HT2prime_LjetsTopoCalcNew", &var25 ); reader->AddVariable( "HT2_LjetsTopoCalcNew", &var26 ); //reader->AddVariable( "dphiLepMet_LjetsTopoCalcNew", &var27 ); // Spectator variables declared in the training have to be added to the reader, too // Float_t spec1,spec2; // reader->AddSpectator( "spec1 := var1*2", &spec1 ); // reader->AddSpectator( "spec2 := var1*3", &spec2 ); // Float_t Category_cat1, Category_cat2, Category_cat3; // if (Use["Category"]){ // // Add artificial spectators for distinguishing categories // reader->AddSpectator( "Category_cat1 := var3<=0", &Category_cat1 ); // reader->AddSpectator( "Category_cat2 := (var3>0)&&(var4<0)", &Category_cat2 ); // reader->AddSpectator( "Category_cat3 := (var3>0)&&(var4>=0)", &Category_cat3 ); // } // --- Book the MVA methods // Book method(s) TString weightFileName = "weights/TMVAClassification_BDT.weights"; reader->BookMVA("BDT method", weightFileName+".xml" ); // Book output histograms UInt_t nbin = 100; TH1F *histBdt(0); histBdt = new TH1F( "MVA_BDT_TX", "MVA_BDT_TX", nbin, -1.0, 1.0); // Prepare input tree (this must be replaced by your data source) // in this example, there is a toy tree with signal and one with background events // we'll later on use only the "signal" events for the test in this example. // TFile *input(0); TString fileName = iFileName; TString fname = sampleLocation+"/"; fname += fileName; TString oFileName = fileName; if (!gSystem->AccessPathName( fname )) input = TFile::Open( fname ); // check if file in local directory exists if (!input) { std::cout << "ERROR: could not open data file" << std::endl; exit(1); } std::cout << "--- TMVAClassificationApp : Using input file: " << input->GetName() << std::endl; // --- Event loop // Prepare the event tree // - here the variable names have to corresponds to your tree // - you can use the same variables as above which is slightly faster, // but of course you can use different ones and copy the values inside the event loop // std::cout << "--- Select signal sample" << std::endl; TTree* theTree = (TTree*)input->Get("ljmet"); gSystem->mkdir( outputLocation ); TFile *target = new TFile( outputLocation+"/"+oFileName,"RECREATE" ); TTree *newTree = theTree->CloneTree(); Float_t BDT; TBranch *branchBDT = newTree->Branch("__BDT_TX__",&BDT,"__BDT_TX__/F"); std::vector<Double_t> *vecVar1; std::vector<Double_t> *vecVar4; std::vector<Double_t> *vecVar16; std::vector<Int_t> *vecVar17; std::vector<Int_t> *vecVar18; std::vector<Double_t> *vecVar19; std::vector<Double_t> *vecVar20; Int_t *intVar5, *intVar8, *intVar9; Double_t *dVar2, *dVar3, *dVar6, *dVar7, *dVar10, *dVar11, *dVar12, *dVar13, *dVar14, *dVar15, *dVar22, *dVar23, *dVar24, dVar25, *dVar26, *dVar27, *dVar28, *dVar29; theTree->SetBranchAddress( "jet_pt_singleLepCalc", &vecVar1); theTree->SetBranchAddress( "bJetPt_CATopoCalc", &vecVar4 ); theTree->SetBranchAddress( "corr_met_singleLepCalc", &dVar6 ); theTree->SetBranchAddress( "muon_1_pt_singleLepCalc", &dVar7 ); theTree->SetBranchAddress( "nBJets_CATopoCalc", &intVar8 ); theTree->SetBranchAddress( "nSelJets_CommonCalc", &intVar9 ); theTree->SetBranchAddress( "LeptonJet_DeltaR_LjetsTopoCalcNew", &dVar10); theTree->SetBranchAddress( "Mevent_LjetsTopoCalcNew", &dVar11); theTree->SetBranchAddress( "W_Pt_LjetsTopoCalcNew", &dVar12 ); theTree->SetBranchAddress( "Jet1Jet2_Pt_LjetsTopoCalcNew", &dVar13 ); theTree->SetBranchAddress( "BestTop_LjetsTopoCalcNew", &dVar14 ); theTree->SetBranchAddress( "BTagTopMass_LjetsTopoCalcNew", &dVar15 ); theTree->SetBranchAddress( "CAHEPTopJetMass_JetSubCalc", &vecVar16 ); theTree->SetBranchAddress( "CAWCSVMSubJets_JetSubCalc", &vecVar17 ); theTree->SetBranchAddress( "CAWCSVLSubJets_JetSubCalc", &vecVar18 ); theTree->SetBranchAddress( "CAWJetPt_JetSubCalc", &vecVar19 ); theTree->SetBranchAddress( "CAWJetMass_JetSubCalc", &vecVar20 ); theTree->SetBranchAddress( "Hz_LjetsTopoCalcNew", &dVar22 ); theTree->SetBranchAddress( "Centrality_LjetsTopoCalcNew", &dVar23 ); theTree->SetBranchAddress( "SqrtsT_LjetsTopoCalcNew", &dVar24 ); theTree->SetBranchAddress( "HT2prime_LjetsTopoCalcNew", &dVar25 ); theTree->SetBranchAddress( "HT2_LjetsTopoCalcNew", &dVar26 ); theTree->SetBranchAddress( "dphiLepMet_LjetsTopoCalcNew", &dVar27 ); theTree->SetBranchAddress( "CAMindrBMass_CATopoCalc", &dVar28 ); theTree->SetBranchAddress( "minDRCAtoB_CATopoCalc", &dVar29 ); // Efficiency calculator for cut method Int_t nSelCutsGA = 0; Double_t effS = 0.7; std::vector<Float_t> vecVar(4); // vector for EvaluateMVA tests std::cout << "--- Processing: " << theTree->GetEntries() << " events" << std::endl; TStopwatch sw; sw.Start(); for (Long64_t ievt=0; ievt<theTree->GetEntries();ievt++) { if (ievt%1000 == 0) std::cout << "--- ... Processing event: " << ievt << std::endl; theTree->GetEntry(ievt); if(vecVar1->size()>0){ var1 = vecVar1->at(0); } if(vecVar1->size()>1){ var2 = vecVar1->at(1); } if(vecVar1->size()>2){ var3 = vecVar1->at(2); } if(vecVar4->size()>0){ var4 = vecVar4->at(0); } if(vecVar4->size()>1){ var5 = vecVar4->at(1); } var6 = dVar6; var7 = dVar7; var8 = intVar8; var9 = intVar9; var10 = dVar10; var11 = dVar11; var12 = dVar12; var13 = dVar13; var14 = dVar14; var15 = dVar15; if(vecVar16->size()>0){ var16 = vecVar16->at(0); } else{ var16 = 0; } if(vecVar17->size()>0){ var17 = vecVar17->at(0); } else{ var18 = 0; } if(vecVar19->size()>0){ var19 = vecVar19->at(0); } else{ var19 = 0; } if(vecVar20->size()>0){ var20 = vecVar20->at(0); } else{ var20 = 0; } if(vecVar16->size()>1){ var21 = vecVar16->at(1); } else{ var21 = 0; } var22 = dVar22; var23 = dVar23; var24 = dVar24; var25 = dVar25; var26 = dVar26; var27 = dVar27; var28 = dVar28; var29 = dVar29; // --- Return the MVA outputs and fill into histograms if (Use["CutsGA"]) { // Cuts is a special case: give the desired signal efficienciy Bool_t passed = reader->EvaluateMVA( "CutsGA method", effS ); if (passed) nSelCutsGA++; } BDT = reader->EvaluateMVA( "BDT method"); histBdt->Fill(BDT); branchBDT->Fill(); } // Get elapsed time sw.Stop(); std::cout << "--- End of event loop: "; sw.Print(); // Get efficiency for cuts classifier if (Use["CutsGA"]) std::cout << "--- Efficiency for CutsGA method: " << double(nSelCutsGA)/theTree->GetEntries() << " (for a required signal efficiency of " << effS << ")" << std::endl; if (Use["CutsGA"]) { // test: retrieve cuts for particular signal efficiency // CINT ignores dynamic_casts so we have to use a cuts-secific Reader function to acces the pointer TMVA::MethodCuts* mcuts = reader->FindCutsMVA( "CutsGA method" ) ; if (mcuts) { std::vector<Double_t> cutsMin; std::vector<Double_t> cutsMax; mcuts->GetCuts( 0.7, cutsMin, cutsMax ); std::cout << "--- -------------------------------------------------------------" << std::endl; std::cout << "--- Retrieve cut values for signal efficiency of 0.7 from Reader" << std::endl; for (UInt_t ivar=0; ivar<cutsMin.size(); ivar++) { std::cout << "... Cut: " << cutsMin[ivar] << " < \"" << mcuts->GetInputVar(ivar) << "\" <= " << cutsMax[ivar] << std::endl; } std::cout << "--- -------------------------------------------------------------" << std::endl; } } // --- Write histograms newTree->Write("",TObject::kOverwrite); target->Close(); std::cout << "--- Created root file: \""<<oFileName<<"\" containing the MVA output histograms" << std::endl; delete reader; std::cout << "==> TMVAClassificationApplication is done!" << endl << std::endl; }
void TMVAClassificationApplication_new(TString myMethodList = "" , TString iFileName = "", TString bkgSample = "", TString sampleLocation = "", TString massPoint = "", TString oFileLocation = "") { #ifdef __CINT__ gROOT->ProcessLine( ".O0" ); // turn off optimization in CINT #endif //--------------------------------------------------------------- // This loads the library TMVA::Tools::Instance(); // Default MVA methods to be trained + tested std::map<std::string,int> Use; // --- Cut optimisation Use["Cuts"] = 0; Use["CutsD"] = 0; Use["CutsPCA"] = 0; Use["CutsGA"] = 0; Use["CutsSA"] = 0; // // // --- Boosted Decision Trees Use["BDT"] = 1; // uses Adaptive Boost std::cout << std::endl; std::cout << "==> Start TMVAClassificationApplication" << std::endl; // Select methods (don't look at this code - not of interest) if (myMethodList != "") { for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) it->second = 0; std::vector<TString> mlist = gTools().SplitString( myMethodList, ',' ); for (UInt_t i=0; i<mlist.size(); i++) { std::string regMethod(mlist[i]); if (Use.find(regMethod) == Use.end()) { std::cout << "Method \"" << regMethod << "\" not known in TMVA under this name. Choose among the following:" << std::endl; for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) { std::cout << it->first << " "; } std::cout << std::endl; return; } Use[regMethod] = 1; } } // -------------------------------------------------------------------------------------------------- // --- Create the Reader object TMVA::Reader *reader = new TMVA::Reader("!Color:!Silent" ); TString weightTail = "_"; weightTail = weightTail + massPoint; // Create a set of variables and declare them to the reader // - the variable names MUST corresponds in name and type to those given in the weight file(s) used Float_t var1, var2, var3, var4, var5, var6, var7, var8, var9, var10, var11, var12, var13, var14, var15, var16, var17, var18; reader->AddVariable( "svMass", &var1); reader->AddVariable( "dRTauTau", &var3 ); reader->AddVariable( "dRJJ", &var4 ); // reader->AddVariable( "svPt", &var5 ); // reader->AddVariable( "dRhh", &var6 ); reader->AddVariable( "met", &var7 ); reader->AddVariable( "mJJ", &var8 ); // reader->AddVariable( "metTau1DPhi", &var9 ); // reader->AddVariable( "metTau2DPhi", &var10); // reader->AddVariable( "metJ1DPhi", &var11); // reader->AddVariable( "metJ2DPhi", &var12 ); // reader->AddVariable( "metTauPairDPhi", &var13 ); // reader->AddVariable( "metSvTauPairDPhi", &var14 ); // reader->AddVariable( "metJetPairDPhi", &var15 ); // reader->AddVariable( "CSVJ1", &var16 ); // reader->AddVariable( "CSVJ2", &var17 ); reader->AddVariable( "fMassKinFit", &var2 ); reader->AddVariable( "chi2KinFit2", &var18 ); // Spectator variables declared in the training have to be added to the reader, too // Float_t spec1,spec2; // reader->AddSpectator( "spec1 := var1*2", &spec1 ); // reader->AddSpectator( "spec2 := var1*3", &spec2 ); // Float_t Category_cat1, Category_cat2, Category_cat3; // if (Use["Category"]){ // // Add artificial spectators for distinguishing categories // reader->AddSpectator( "Category_cat1 := var3<=0", &Category_cat1 ); // reader->AddSpectator( "Category_cat2 := (var3>0)&&(var4<0)", &Category_cat2 ); // reader->AddSpectator( "Category_cat3 := (var3>0)&&(var4>=0)", &Category_cat3 ); // } // --- Book the MVA methods // Book method(s) TString weightFileName = "/nfs_scratch/zmao/test/CMSSW_5_3_15/src/TMVA-v4.2.0/test/weights/TMVAClassification_BDT.weights_"; weightFileName += bkgSample; weightFileName += weightTail; reader->BookMVA("BDT method", weightFileName+".xml" ); // Book output histograms UInt_t nbin = 200; TH1F *histBdt(0); histBdt = new TH1F( "MVA_BDT", "MVA_BDT", nbin, -1.0, 1.0); // Prepare input tree (this must be replaced by your data source) // in this example, there is a toy tree with signal and one with background events // we'll later on use only the "signal" events for the test in this example. // TFile *input(0); TString fileName = iFileName; TString fname = sampleLocation; fname += fileName; TString oFileName = oFileLocation; oFileName += "ClassApp_" + bkgSample; oFileName += "_"; oFileName += fileName; if (!gSystem->AccessPathName( fname )) input = TFile::Open(fname); // check if file in local directory exists if (!input) { std::cout << "ERROR: could not open data file" << std::endl; exit(1); } std::cout << "--- TMVAClassificationApp : Using input file: " << input->GetName() << std::endl; // --- Event loop // Prepare the event tree // - here the variable names have to corresponds to your tree // - you can use the same variables as above which is slightly faster, // but of course you can use different ones and copy the values inside the event loop // std::cout << "--- Select signal sample" << std::endl; TTree* theTree = (TTree*)input->Get("eventTree"); TFile *target = new TFile( oFileName,"RECREATE" ); TTree *newTree = theTree->CloneTree(); Float_t BDT; TBranch *branchBDT = newTree->Branch("BDT_"+bkgSample,&BDT,"BDT/F"); std::vector<Double_t> *vecVar1; std::vector<Double_t> *vecVar5; std::vector<Double_t> *vecVar7; theTree->SetBranchAddress( "svMass", &vecVar1); theTree->SetBranchAddress( "dRTauTau", &var3); theTree->SetBranchAddress( "dRJJ", &var4 ); // theTree->SetBranchAddress( "svPt", &vecVar5 ); // theTree->SetBranchAddress( "dRhh", &var6 ); theTree->SetBranchAddress( "met", &vecVar7 ); theTree->SetBranchAddress( "mJJ", &var8 ); // theTree->SetBranchAddress( "metTau1DPhi", &var9 ); // theTree->SetBranchAddress( "metTau2DPhi", &var10); // theTree->SetBranchAddress( "metJ1DPhi", &var11); // theTree->SetBranchAddress( "metJ2DPhi", &var12 ); // theTree->SetBranchAddress( "metTauPairDPhi", &var13 ); // theTree->SetBranchAddress( "metSvTauPairDPhi", &var14 ); // theTree->SetBranchAddress( "metJetPairDPhi", &var15 ); // theTree->SetBranchAddress( "CSVJ1", &var16 ); // theTree->SetBranchAddress( "CSVJ2", &var17 ); theTree->SetBranchAddress( "fMassKinFit", &var2); theTree->SetBranchAddress( "chi2KinFit2", &var18); //to get initial pre-processed events TH1F* cutFlow = (TH1F*)input->Get("preselection"); // Efficiency calculator for cut method Int_t nSelCutsGA = 0; Double_t effS = 0.7; std::vector<Float_t> vecVar(4); // vector for EvaluateMVA tests std::cout << "--- Processing: " << theTree->GetEntries() << " events" << std::endl; TStopwatch sw; sw.Start(); for (Long64_t ievt=0; ievt<theTree->GetEntries();ievt++) { if (ievt%1000 == 0) std::cout << "--- ... Processing event: " << ievt << std::endl; theTree->GetEntry(ievt); var1 = vecVar1->at(0); // var5 = vecVar5->at(0); var7 = vecVar7->at(0); // --- Return the MVA outputs and fill into histograms if (Use["CutsGA"]) { // Cuts is a special case: give the desired signal efficienciy Bool_t passed = reader->EvaluateMVA( "CutsGA method", effS ); if (passed) nSelCutsGA++; } BDT = reader->EvaluateMVA( "BDT method"); histBdt->Fill(BDT); branchBDT->Fill(); } // Get elapsed time sw.Stop(); std::cout << "--- End of event loop: "; sw.Print(); // Get efficiency for cuts classifier if (Use["CutsGA"]) std::cout << "--- Efficiency for CutsGA method: " << double(nSelCutsGA)/theTree->GetEntries() << " (for a required signal efficiency of " << effS << ")" << std::endl; if (Use["CutsGA"]) { // test: retrieve cuts for particular signal efficiency // CINT ignores dynamic_casts so we have to use a cuts-secific Reader function to acces the pointer TMVA::MethodCuts* mcuts = reader->FindCutsMVA( "CutsGA method" ) ; if (mcuts) { std::vector<Double_t> cutsMin; std::vector<Double_t> cutsMax; mcuts->GetCuts( 0.7, cutsMin, cutsMax ); std::cout << "--- -------------------------------------------------------------" << std::endl; std::cout << "--- Retrieve cut values for signal efficiency of 0.7 from Reader" << std::endl; for (UInt_t ivar=0; ivar<cutsMin.size(); ivar++) { std::cout << "... Cut: " << cutsMin[ivar] << " < \"" << mcuts->GetInputVar(ivar) << "\" <= " << cutsMax[ivar] << std::endl; } std::cout << "--- -------------------------------------------------------------" << std::endl; } } // --- Write histograms histBdt->Write(); cutFlow->Write(); newTree->Write(); target->Close(); std::cout << "--- Created root file: \""<<oFileName<<"\" containing the MVA output histograms" << std::endl; delete reader; std::cout << "==> TMVAClassificationApplication is done!" << endl << std::endl; }
void TMVAClassificationApplication( TString myMethodList = "" ) { //--------------------------------------------------------------- // default MVA methods to be trained + tested // this loads the library TMVA::Tools::Instance(); std::map<std::string,int> Use; Use["CutsGA"] = 0; // other "Cuts" methods work identically // --- Use["Likelihood"] = 1; Use["LikelihoodD"] = 0; // the "D" extension indicates decorrelated input variables (see option strings) Use["LikelihoodPCA"] = 0; // the "PCA" extension indicates PCA-transformed input variables (see option strings) Use["LikelihoodKDE"] = 0; Use["LikelihoodMIX"] = 0; // --- Use["PDERS"] = 0; Use["PDERSD"] = 0; Use["PDERSPCA"] = 0; Use["PDERSkNN"] = 0; // depreciated until further notice Use["PDEFoam"] = 0; // -- Use["KNN"] = 0; // --- Use["HMatrix"] = 0; Use["Fisher"] = 0; Use["FisherG"] = 0; Use["BoostedFisher"] = 0; Use["LD"] = 0; // --- Use["FDA_GA"] = 0; Use["FDA_SA"] = 0; Use["FDA_MC"] = 0; Use["FDA_MT"] = 0; Use["FDA_GAMT"] = 0; Use["FDA_MCMT"] = 0; // --- Use["MLP"] = 0; // this is the recommended ANN Use["MLPBFGS"] = 0; // recommended ANN with optional training method Use["MLPBNN"] = 0; // Use["CFMlpANN"] = 0; // *** missing Use["TMlpANN"] = 0; // --- Use["SVM"] = 0; // --- Use["BDT"] = 1; Use["BDTD"] = 0; Use["BDTG"] = 0; Use["BDTB"] = 0; // --- Use["RuleFit"] = 0; // --- Use["Category"] = 0; // --- Use["Plugin"] = 0; // --------------------------------------------------------------- std::cout << std::endl; std::cout << "==> Start TMVAClassificationApplication" << std::endl; if (myMethodList != "") { for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) it->second = 0; std::vector<TString> mlist = gTools().SplitString( myMethodList, ',' ); for (UInt_t i=0; i<mlist.size(); i++) { std::string regMethod(mlist[i]); if (Use.find(regMethod) == Use.end()) { std::cout << "Method \"" << regMethod << "\" not known in TMVA under this name. Choose among the following:" << std::endl; for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) std::cout << it->first << " "; std::cout << std::endl; return; } Use[regMethod] = 0; } } // // create the Reader object // TMVA::Reader *reader = new TMVA::Reader( "!Color:!Silent" ); Float_t Z_rapidity_z; reader->AddVariable("Z_rapidity_z",&Z_rapidity_z); Float_t THRUST_2D; reader->AddVariable("THRUST_2D",&THRUST_2D); Float_t L1_L2_cosangle; reader->AddVariable("L1_L2_cosangle",&L1_L2_cosangle); Float_t TransMass_ZH150_uncl; reader->AddVariable("TransMass_ZH150_uncl",&TransMass_ZH150_uncl); Float_t TransMass_ZH150; reader->AddVariable("TransMass_ZH150",&TransMass_ZH150); Float_t DeltaPhi_ZH; reader->AddVariable("DeltaPhi_ZH",&DeltaPhi_ZH); Float_t DeltaPhi_ZH_uncl; reader->AddVariable("DeltaPhi_ZH_uncl",&DeltaPhi_ZH_uncl); Float_t CMAngle; reader->AddVariable("CMAngle",&CMAngle); Float_t CS_cosangle; reader->AddVariable("CS_cosangle",&CS_cosangle); // create a set of variables and declare them to the reader // - the variable names must corresponds in name and type to // those given in the weight file(s) that you use Float_t var1, var2; Float_t var3, var4; // reader->AddVariable( "myvar1 := var1+var2", &var1 ); // reader->AddVariable( "myvar2 := var1-var2", &var2 ); // reader->AddVariable( "var3", &var3 ); // reader->AddVariable( "var4", &var4 ); //Spectator variables declared in the training have to be added to the reader, too Float_t spec1,spec2; // reader->AddSpectator( "spec1 := var1*2", &spec1 ); float nonsense =0; // reader->AddSpectator( "spec2 := var1*3", &spec2 ); float nonsense =0; Float_t Category_cat1, Category_cat2, Category_cat3; if (Use["Category"]){ // add artificial spectators for distinguishing categories // reader->AddSpectator( "Category_cat1 := var3<=0", &Category_cat1 ); float nonsense =0; // reader->AddSpectator( "Category_cat2 := (var3>0)&&(var4<0)", &Category_cat2 ); float nonsense =0; // reader->AddSpectator( "Category_cat3 := (var3>0)&&(var4>=0)", &Category_cat3 ); float nonsense =0; } // // book the MVA methods // TString dir = "weights/"; TString prefix = "TMVAClassification"; // book method(s) for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) { if (it->second) { TString methodName = it->first + " method"; TString weightfile = dir + prefix + "_" + TString(it->first) + ".weights.xml"; reader->BookMVA( methodName, weightfile ); } } // example how to use your own method as plugin if (Use["Plugin"]) { // the weight file contains a line // Method : MethodName::InstanceName // if MethodName is not a known TMVA method, it is assumed to be // a user implemented method which has to be loaded via the // plugin mechanism // for user implemented methods the line in the weight file can be // Method : PluginName::InstanceName // where PluginName can be anything // before usage the plugin has to be defined, which can happen // either through the following line in .rootrc: // # plugin handler plugin class library constructor format // Plugin.TMVA@@MethodBase: PluginName MethodClassName UserPackage "MethodName(DataSet&,TString)" // // or by telling the global plugin manager directly gPluginMgr->AddHandler("TMVA@@MethodBase", "PluginName", "MethodClassName", "UserPackage", "MethodName(DataSet&,TString)"); // the class is then looked for in libUserPackage.so // now the method can be booked like any other reader->BookMVA( "User method", dir + prefix + "_User.weights.txt" ); } // book output histograms UInt_t nbin = 100; TH1F *histLk(0), *histLkD(0), *histLkPCA(0), *histLkKDE(0), *histLkMIX(0), *histPD(0), *histPDD(0); TH1F *histPDPCA(0), *histPDEFoam(0), *histPDEFoamErr(0), *histPDEFoamSig(0), *histKNN(0), *histHm(0); TH1F *histFi(0), *histFiG(0), *histFiB(0), *histLD(0), *histNn(0),*histNnbfgs(0),*histNnbnn(0), *histNnC(0), *histNnT(0), *histBdt(0), *histBdtG(0), *histBdtD(0); TH1F *histRf(0), *histSVMG(0), *histSVMP(0), *histSVML(0), *histFDAMT(0), *histFDAGA(0), *histCat(0), *histPBdt(0); if (Use["Likelihood"]) histLk = new TH1F( "MVA_Likelihood", "MVA_Likelihood", nbin, -1, 1 ); if (Use["LikelihoodD"]) histLkD = new TH1F( "MVA_LikelihoodD", "MVA_LikelihoodD", nbin, -1, 0.9999 ); if (Use["LikelihoodPCA"]) histLkPCA = new TH1F( "MVA_LikelihoodPCA", "MVA_LikelihoodPCA", nbin, -1, 1 ); if (Use["LikelihoodKDE"]) histLkKDE = new TH1F( "MVA_LikelihoodKDE", "MVA_LikelihoodKDE", nbin, -0.00001, 0.99999 ); if (Use["LikelihoodMIX"]) histLkMIX = new TH1F( "MVA_LikelihoodMIX", "MVA_LikelihoodMIX", nbin, 0, 1 ); if (Use["PDERS"]) histPD = new TH1F( "MVA_PDERS", "MVA_PDERS", nbin, 0, 1 ); if (Use["PDERSD"]) histPDD = new TH1F( "MVA_PDERSD", "MVA_PDERSD", nbin, 0, 1 ); if (Use["PDERSPCA"]) histPDPCA = new TH1F( "MVA_PDERSPCA", "MVA_PDERSPCA", nbin, 0, 1 ); if (Use["KNN"]) histKNN = new TH1F( "MVA_KNN", "MVA_KNN", nbin, 0, 1 ); if (Use["HMatrix"]) histHm = new TH1F( "MVA_HMatrix", "MVA_HMatrix", nbin, -0.95, 1.55 ); if (Use["Fisher"]) histFi = new TH1F( "MVA_Fisher", "MVA_Fisher", nbin, -4, 4 ); if (Use["FisherG"]) histFiG = new TH1F( "MVA_FisherG", "MVA_FisherG", nbin, -1, 1 ); if (Use["BoostedFisher"]) histFiB = new TH1F( "MVA_BoostedFisher", "MVA_BoostedFisher", nbin, -2, 2 ); if (Use["LD"]) histLD = new TH1F( "MVA_LD", "MVA_LD", nbin, -2, 2 ); if (Use["MLP"]) histNn = new TH1F( "MVA_MLP", "MVA_MLP", nbin, -1.25, 1.5 ); if (Use["MLPBFGS"]) histNnbfgs = new TH1F( "MVA_MLPBFGS", "MVA_MLPBFGS", nbin, -1.25, 1.5 ); if (Use["MLPBNN"]) histNnbnn = new TH1F( "MVA_MLPBNN", "MVA_MLPBNN", nbin, -1.25, 1.5 ); if (Use["CFMlpANN"]) histNnC = new TH1F( "MVA_CFMlpANN", "MVA_CFMlpANN", nbin, 0, 1 ); if (Use["TMlpANN"]) histNnT = new TH1F( "MVA_TMlpANN", "MVA_TMlpANN", nbin, -1.3, 1.3 ); if (Use["BDT"]) histBdt = new TH1F( "MVA_BDT", "MVA_BDT", nbin, -0.8, 0.8 ); if (Use["BDTD"]) histBdtD = new TH1F( "MVA_BDTD", "MVA_BDTD", nbin, -0.8, 0.8 ); if (Use["BDTG"]) histBdtG = new TH1F( "MVA_BDTG", "MVA_BDTG", nbin, -1.0, 1.0 ); if (Use["RuleFit"]) histRf = new TH1F( "MVA_RuleFit", "MVA_RuleFit", nbin, -2.0, 2.0 ); if (Use["SVM_Gauss"]) histSVMG = new TH1F( "MVA_SVM_Gauss", "MVA_SVM_Gauss", nbin, 0.0, 1.0 ); if (Use["SVM_Poly"]) histSVMP = new TH1F( "MVA_SVM_Poly", "MVA_SVM_Poly", nbin, 0.0, 1.0 ); if (Use["SVM_Lin"]) histSVML = new TH1F( "MVA_SVM_Lin", "MVA_SVM_Lin", nbin, 0.0, 1.0 ); if (Use["FDA_MT"]) histFDAMT = new TH1F( "MVA_FDA_MT", "MVA_FDA_MT", nbin, -2.0, 3.0 ); if (Use["FDA_GA"]) histFDAGA = new TH1F( "MVA_FDA_GA", "MVA_FDA_GA", nbin, -2.0, 3.0 ); if (Use["Category"]) histCat = new TH1F( "MVA_Category", "MVA_Category", nbin, -2., 2. ); if (Use["Plugin"]) histPBdt = new TH1F( "MVA_PBDT", "MVA_BDT", nbin, -0.8, 0.8 ); // PDEFoam also returns per-event error, fill in histogram, and also fill significance if (Use["PDEFoam"]) { histPDEFoam = new TH1F( "MVA_PDEFoam", "MVA_PDEFoam", nbin, 0, 1 ); histPDEFoamErr = new TH1F( "MVA_PDEFoamErr", "MVA_PDEFoam error", nbin, 0, 1 ); histPDEFoamSig = new TH1F( "MVA_PDEFoamSig", "MVA_PDEFoam significance", nbin, 0, 10 ); } // book example histogram for probability (the other methods are done similarly) TH1F *probHistFi(0), *rarityHistFi(0); if (Use["Fisher"]) { probHistFi = new TH1F( "MVA_Fisher_Proba", "MVA_Fisher_Proba", nbin, 0, 1 ); rarityHistFi = new TH1F( "MVA_Fisher_Rarity", "MVA_Fisher_Rarity", nbin, 0, 1 ); } // Prepare input tree (this must be replaced by your data source) // in this example, there is a toy tree with signal and one with background events // we'll later on use only the "signal" events for the test in this example. // TFile *input(0); TString fname = "/tmp/chasco/ORIGINAL//Data_MuEG2011B_1.root"; if (!gSystem->AccessPathName( fname )) { input = TFile::Open( fname ); // check if file in local directory exists } else { input = TFile::Open( "http://root.cern.ch/files/tmva_class_example.root" ); // if not: download from ROOT server } if (!input) { std::cout << "ERROR: could not open data file" << std::endl; exit(1); } std::cout << "--- TMVAClassificationApp : Using input file: " << input->GetName() << std::endl; // // prepare the tree // - here the variable names have to corresponds to your tree // - you can use the same variables as above which is slightly faster, // but of course you can use different ones and copy the values inside the event loop // TTree* BigTree = (TTree*)input->Get("data"); TFile *tmp = new TFile( "tmp.root","RECREATE" ); TTree* theTree = BigTree->CopyTree("((cat == 1) + (cat == 2))*(ln==0)*(Cosmic==0)*(fabs(Mass_Z - 91.18)<10)*(Pt_Z>30)*(DeltaPhi_metjet>0.5)*(Pt_J1 < 30)*(pfMEToverPt_Z > 0.4)*(pfMEToverPt_Z < 1.8)*((Pt_Jet_btag_CSV_max > 20)*(btag_CSV_max < 0.244) + (1-(Pt_Jet_btag_CSV_max > 20)))*(sqrt(pow(dilepPROJLong + 1.25*recoilPROJLong + 0.0*uncertPROJLong,2)*(dilepPROJLong + 1.25*recoilPROJLong + 0.0*uncertPROJLong > 0) + 1.0*pow(dilepPROJPerp + 1.25*recoilPROJPerp + 0.0*uncertPROJPerp,2)*(dilepPROJPerp + 1.25*recoilPROJPerp + 0.0*uncertPROJPerp > 0)) > 45.0)"); std::cout << "--- Select signal sample" << std::endl; Float_t userVar1, userVar2; // theTree->SetBranchAddress( "var1", &userVar1 ); // theTree->SetBranchAddress( "var2", &userVar2 ); // theTree->SetBranchAddress( "var3", &var3 ); // theTree->SetBranchAddress( "var4", &var4 ); theTree->SetBranchAddress( " Z_rapidity_z", &Z_rapidity_z); theTree->SetBranchAddress( " THRUST_2D", &THRUST_2D); theTree->SetBranchAddress( " L1_L2_cosangle", &L1_L2_cosangle); theTree->SetBranchAddress( " TransMass_ZH150_uncl", &TransMass_ZH150_uncl); theTree->SetBranchAddress( " TransMass_ZH150", &TransMass_ZH150); theTree->SetBranchAddress( " DeltaPhi_ZH", &DeltaPhi_ZH); theTree->SetBranchAddress( " DeltaPhi_ZH_uncl", &DeltaPhi_ZH_uncl); theTree->SetBranchAddress( " CMAngle", &CMAngle); theTree->SetBranchAddress( " CS_cosangle", &CS_cosangle); // efficiency calculator for cut method Int_t nSelCutsGA = 0; Double_t effS = 0.7; std::vector<Float_t> vecVar(9); // vector for EvaluateMVA tests std::cout << "--- Processing: " << theTree->GetEntries() << " events" << std::endl; TStopwatch sw; sw.Start(); for (Long64_t ievt=0; ievt<theTree->GetEntries();ievt++) { if (ievt%1000 == 0){ std::cout << "--- ... Processing event: " << ievt << std::endl; } theTree->GetEntry(ievt); var1 = userVar1 + userVar2; var2 = userVar1 - userVar2; if (ievt <20){ // test the twodifferent Reader::EvaluateMVA functions // access via registered variables compared to access via vector<float> // vecVar[0]=var1; // vecVar[1]=var2; // vecVar[2]=var3; // vecVar[3]=var4; vecVar[0]=Z_rapidity_z; vecVar[1]=THRUST_2D; vecVar[2]=L1_L2_cosangle; vecVar[3]=TransMass_ZH150_uncl; vecVar[4]=TransMass_ZH150; vecVar[5]=DeltaPhi_ZH; vecVar[6]=DeltaPhi_ZH_uncl; vecVar[7]=CMAngle; vecVar[8]=CS_cosangle; for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) { if (it->second) { TString mName = it->first + " method"; Double_t mva1 = reader->EvaluateMVA( mName); Double_t mva2 = reader->EvaluateMVA( vecVar, mName); if (mva1 != mva2) { std::cout << "++++++++++++++ ERROR in "<< mName <<", comparing different EvaluateMVA results val1=" << mva1 << " val2="<<mva2<<std::endl; } } } // now test that the inputs do matter TRandom3 rand(0); // vecVar[0]=rand.Rndm(); // vecVar[1]=rand.Rndm(); // vecVar[2]=rand.Rndm(); // vecVar[3]=rand.Rndm(); vecVar[0]=rand.Rndm(); vecVar[1]=rand.Rndm(); vecVar[2]=rand.Rndm(); vecVar[3]=rand.Rndm(); vecVar[4]=rand.Rndm(); vecVar[5]=rand.Rndm(); vecVar[6]=rand.Rndm(); vecVar[7]=rand.Rndm(); vecVar[8]=rand.Rndm(); for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) { if (it->second) { TString mName = it->first + " method"; Double_t mva1 = reader->EvaluateMVA( mName); Double_t mva2 = reader->EvaluateMVA( vecVar, mName); if (mva1 == mva2) { std::cout << "++++++++++++++ ERROR in "<< mName <<", obtaining idnetical output for different inputs" <<std::endl; } } } } // // return the MVAs and fill to histograms // if (Use["CutsGA"]) { // Cuts is a special case: give the desired signal efficienciy Bool_t passed = reader->EvaluateMVA( "CutsGA method", effS ); if (passed) nSelCutsGA++; } if (Use["Likelihood" ]) histLk ->Fill( reader->EvaluateMVA( "Likelihood method" ) ); if (Use["LikelihoodD" ]) histLkD ->Fill( reader->EvaluateMVA( "LikelihoodD method" ) ); if (Use["LikelihoodPCA"]) histLkPCA ->Fill( reader->EvaluateMVA( "LikelihoodPCA method" ) ); if (Use["LikelihoodKDE"]) histLkKDE ->Fill( reader->EvaluateMVA( "LikelihoodKDE method" ) ); if (Use["LikelihoodMIX"]) histLkMIX ->Fill( reader->EvaluateMVA( "LikelihoodMIX method" ) ); if (Use["PDERS" ]) histPD ->Fill( reader->EvaluateMVA( "PDERS method" ) ); if (Use["PDERSD" ]) histPDD ->Fill( reader->EvaluateMVA( "PDERSD method" ) ); if (Use["PDERSPCA" ]) histPDPCA ->Fill( reader->EvaluateMVA( "PDERSPCA method" ) ); if (Use["KNN" ]) histKNN ->Fill( reader->EvaluateMVA( "KNN method" ) ); if (Use["HMatrix" ]) histHm ->Fill( reader->EvaluateMVA( "HMatrix method" ) ); if (Use["Fisher" ]) histFi ->Fill( reader->EvaluateMVA( "Fisher method" ) ); if (Use["FisherG" ]) histFiG ->Fill( reader->EvaluateMVA( "FisherG method" ) ); if (Use["BoostedFisher"]) histFiB ->Fill( reader->EvaluateMVA( "BoostedFisher method" ) ); if (Use["LD" ]) histLD ->Fill( reader->EvaluateMVA( "LD method" ) ); if (Use["MLP" ]) histNn ->Fill( reader->EvaluateMVA( "MLP method" ) ); if (Use["MLPBFGS" ]) histNnbfgs ->Fill( reader->EvaluateMVA( "MLPBFGS method" ) ); if (Use["MLPBNN" ]) histNnbnn ->Fill( reader->EvaluateMVA( "MLPBNN method" ) ); if (Use["CFMlpANN" ]) histNnC ->Fill( reader->EvaluateMVA( "CFMlpANN method" ) ); if (Use["TMlpANN" ]) histNnT ->Fill( reader->EvaluateMVA( "TMlpANN method" ) ); if (Use["BDT" ]) histBdt ->Fill( reader->EvaluateMVA( "BDT method" ) ); if (Use["BDTD" ]) histBdtD ->Fill( reader->EvaluateMVA( "BDTD method" ) ); if (Use["BDTG" ]) histBdtG ->Fill( reader->EvaluateMVA( "BDTG method" ) ); if (Use["RuleFit" ]) histRf ->Fill( reader->EvaluateMVA( "RuleFit method" ) ); if (Use["SVM_Gauss" ]) histSVMG ->Fill( reader->EvaluateMVA( "SVM_Gauss method" ) ); if (Use["SVM_Poly" ]) histSVMP ->Fill( reader->EvaluateMVA( "SVM_Poly method" ) ); if (Use["SVM_Lin" ]) histSVML ->Fill( reader->EvaluateMVA( "SVM_Lin method" ) ); if (Use["FDA_MT" ]) histFDAMT ->Fill( reader->EvaluateMVA( "FDA_MT method" ) ); if (Use["FDA_GA" ]) histFDAGA ->Fill( reader->EvaluateMVA( "FDA_GA method" ) ); if (Use["Category" ]) histCat ->Fill( reader->EvaluateMVA( "Category method" ) ); if (Use["Plugin" ]) histPBdt ->Fill( reader->EvaluateMVA( "P_BDT method" ) ); // retrieve also per-event error if (Use["PDEFoam"]) { Double_t val = reader->EvaluateMVA( "PDEFoam method" ); Double_t err = reader->GetMVAError(); histPDEFoam ->Fill( val ); histPDEFoamErr->Fill( err ); histPDEFoamSig->Fill( val/err ); } // retrieve probability instead of MVA output if (Use["Fisher"]) { probHistFi ->Fill( reader->GetProba ( "Fisher method" ) ); rarityHistFi->Fill( reader->GetRarity( "Fisher method" ) ); } } // get elapsed time sw.Stop(); std::cout << "--- End of event loop: "; sw.Print(); // get efficiency for cuts classifier if (Use["CutsGA"]) std::cout << "--- Efficiency for CutsGA method: " << double(nSelCutsGA)/theTree->GetEntries() << " (for a required signal efficiency of " << effS << ")" << std::endl; if (Use["CutsGA"]) { // test: retrieve cuts for particular signal efficiency // CINT ignores dynamic_casts so we have to use a cuts-secific Reader function to acces the pointer TMVA::MethodCuts* mcuts = reader->FindCutsMVA( "CutsGA method" ) ; if (mcuts) { std::vector<Double_t> cutsMin; std::vector<Double_t> cutsMax; mcuts->GetCuts( 0.7, cutsMin, cutsMax ); std::cout << "--- -------------------------------------------------------------" << std::endl; std::cout << "--- Retrieve cut values for signal efficiency of 0.7 from Reader" << std::endl; for (UInt_t ivar=0; ivar<cutsMin.size(); ivar++) { std::cout << "... Cut: " << cutsMin[ivar] << " < \"" << mcuts->GetInputVar(ivar) << "\" <= " << cutsMax[ivar] << std::endl; } std::cout << "--- -------------------------------------------------------------" << std::endl; } } // // write histograms // TFile *target = new TFile( "TMVApp.root","RECREATE" ); if (Use["Likelihood" ]) histLk ->Write(); if (Use["LikelihoodD" ]) histLkD ->Write(); if (Use["LikelihoodPCA"]) histLkPCA ->Write(); if (Use["LikelihoodKDE"]) histLkKDE ->Write(); if (Use["LikelihoodMIX"]) histLkMIX ->Write(); if (Use["PDERS" ]) histPD ->Write(); if (Use["PDERSD" ]) histPDD ->Write(); if (Use["PDERSPCA" ]) histPDPCA ->Write(); if (Use["KNN" ]) histKNN ->Write(); if (Use["HMatrix" ]) histHm ->Write(); if (Use["Fisher" ]) histFi ->Write(); if (Use["FisherG" ]) histFiG ->Write(); if (Use["BoostedFisher"]) histFiB ->Write(); if (Use["LD" ]) histLD ->Write(); if (Use["MLP" ]) histNn ->Write(); if (Use["MLPBFGS" ]) histNnbfgs ->Write(); if (Use["MLPBNN" ]) histNnbnn ->Write(); if (Use["CFMlpANN" ]) histNnC ->Write(); if (Use["TMlpANN" ]) histNnT ->Write(); if (Use["BDT" ]) histBdt ->Write(); if (Use["BDTD" ]) histBdtD ->Write(); if (Use["BDTG" ]) histBdtG ->Write(); if (Use["RuleFit" ]) histRf ->Write(); if (Use["SVM_Gauss" ]) histSVMG ->Write(); if (Use["SVM_Poly" ]) histSVMP ->Write(); if (Use["SVM_Lin" ]) histSVML ->Write(); if (Use["FDA_MT" ]) histFDAMT ->Write(); if (Use["FDA_GA" ]) histFDAGA ->Write(); if (Use["Category" ]) histCat ->Write(); if (Use["Plugin" ]) histPBdt ->Write(); // write also error and significance histos if (Use["PDEFoam"]) { histPDEFoam->Write(); histPDEFoamErr->Write(); histPDEFoamSig->Write(); } // write also probability hists if (Use["Fisher"]) { if (probHistFi != 0) probHistFi->Write(); if (rarityHistFi != 0) rarityHistFi->Write(); } target->Close(); std::cout << "--- Created root file: \"TMVApp.root\" containing the MVA output histograms" << std::endl; delete reader; std::cout << "==> TMVAClassificationApplication is done!" << endl << std::endl; }
Int_t MergeV1(TString fileNameDigits="digits.root", TString fileNameSDigitsSig="sig.sdigits.root", TString fileNameSDigitsBgr="bgr.sdigits.root", Int_t nEvents = 1, Int_t iITS = 2, Int_t iTPC = 0, Int_t iTRD = 0, Int_t iPHOS = 0, Int_t iMUON = 0, Int_t iRICH = 0, Int_t iCopy = 1) { // delete the current gAlice object, the one from input file // will be used if(gAlice){ delete gAlice; gAlice = 0; } // end if gAlice // Connect the Root Galice file containing Geometry, Kine and Hits TFile *file = (TFile*)gROOT->GetListOfFiles()->FindObject(fileNameSDigitsSig.Data()); if(!file) file = new TFile(fileNameSDigitsSig.Data()); TDatime *ct0 = new TDatime(2002,04,26,00,00,00), ct = file->GetCreationDate(); // Get AliRun object from file or create it if not on file if(!gAlice) { gAlice = (AliRun*)file->Get("gAlice"); if(gAlice) printf("AliRun object found on file\n"); if(!gAlice) gAlice = new AliRun("gAlice","Alice test program"); } // end if !gAlice AliRunDigitizer * manager = new AliRunDigitizer(2,1); manager->SetInputStream(0,fileNameSDigitsSig.Data()); manager->SetInputStream(1,fileNameSDigitsBgr.Data()); if (fileNameDigits != "") { // if (iCopy) { // AliCopyN(fileNameSDigitsSig,fileNameDigits); // } manager->SetOutputFile(fileNameDigits); } manager->SetNrOfEventsToWrite(nEvents); if (iITS) { AliITSDigitizer *dITS = new AliITSDigitizer(manager); if (iITS == 2) dITS->SetByRegionOfInterestFlag(1); if(ct0->GetDate()>ct.GetDate()){ // For old files, must change SDD noise. AliITS *ITS = (AliITS*) gAlice->GetDetector("ITS"); AliITSresponseSDD *resp1 = ITS->DetType(1)->GetResponseModel(); resp1->SetNoiseParam(); resp1->SetNoiseAfterElectronics(); Float_t n,b; Int_t cPar[8]; resp1->GetNoiseParam(n,b); n = resp1->GetNoiseAfterElectronics(); cPar[0]=0; cPar[1]=0; cPar[2]=(Int_t)(b + 2.*n + 0.5); cPar[3]=(Int_t)(b + 2.*n + 0.5); cPar[4]=0; cPar[5]=0; cPar[6]=0; cPar[7]=0; resp1->SetCompressParam(cPar); } // end if } if (iTPC) AliTPCDigitizer *dTPC = new AliTPCDigitizer(manager); if (iTRD) AliTRDdigitizer *dTRD = new AliTRDdigitizer(manager); if (iPHOS) AliPHOSDigitizer *dPHOS = new AliPHOSDigitizer(manager); if (iMUON) AliMUONDigitizer *dMUON = new AliMUONDigitizer(manager); if (iRICH) AliRICHDigitizer *dRICH = new AliRICHDigitizer(manager); TStopwatch timer; timer.Start(); manager->Exec("deb all"); timer.Stop(); timer.Print(); // delete gAlice; // gAlice = 0; delete manager; }
void varsig0196_4() { TStopwatch timer; timer.Start(); // Define histogarams gStyle->SetOptFit(1111); gStyle->SetOptStat(111111); TGraphErrors *g1 = new TGraphErrors(20); g1->SetName("Dilvsdm"); g1->SetTitle("Dilution vs. dm"); TGraph *g2 = new TGraph(20); g2->SetName("DilErrvsdm"); g2->SetTitle("Dil Err vs. dm"); for(Int_t i=0.;i<20.;i++){ oscpar1_init = 0.5+(0.33*i); cout << "============== dm fixed at "<< oscpar1_init <<" ===================="<<endl; //TH1F *h1 = new TH1F("h1","Gaussian Dist",50, 4.8, 5.8); TH1F *h2 = new TH1F("h2","Lifetime Dist",nBins, min, max); TH1F *h3 = new TH1F("h3","Lifetime Dist tag=1",nBins, min, max); TH1F *h4 = new TH1F("h4","Lifetime Dist tag=-1",nBins, min, max); // Get data // Generate events mixmasta_mc(); for (Int_t ja=0; ja<nEvts; ja++){ h2->Fill(lifetime[ja]); if (tag[ja] == 1){ h3->Fill(lifetime[ja]); }else if(tag[ja] == -1){ h4->Fill(lifetime[ja]); }else{ cout << "Tag value "<<tag[ja]<< " out of range, should be -1 or 1" << endl; break; } } // Do unbinned likelihood fit TF1 *f3 = new TF1("f3", lftmosc_plt_d, min, max, 5); TF1 *f4 = new TF1("f4", lftmosc_plt_d, min, max, 5); unbinFitosc_d(); for (Int_t j=0; j<4; j++){ f3->SetParameter(j,fitpar[j]); f4->SetParameter(j,fitpar[j]); } f3->SetParameter(4,1); f4->SetParameter(4,-1); g1->SetPoint(i,fitpar[1],-(1-2*fitpar[3])/dilfit); g1->SetPointError(i,fiterr[1],2*fiterr[3]/dilfit); g2->SetPoint(i,fitpar[1],1.65*2*fiterr[3]/dilfit); delete h2; delete h3; delete h4; delete f3; delete f4; } TCanvas *vardm = new TCanvas("vardm","varsig0196_4",800,400); vardm->Divide(2,1); vardm->cd(1); g1->GetXaxis()->SetTitle("dm"); g1->GetXaxis()->CenterTitle(); g1->GetYaxis()->SetTitle("Dilution (1-2alpha)"); g1->GetYaxis()->CenterTitle(); //g1->SetMarkerStyle(21); //g1->SetMarkerSize(1); g1->Draw("AP*"); vardm->cd(2); gStyle->SetPadColor(10); gStyle->SetCanvasColor(10); vardm->SetGrid(); g2->GetXaxis()->SetTitle("dm"); g2->GetXaxis()->CenterTitle(); g2->GetYaxis()->SetTitle("Dil. Err"); g2->GetYaxis()->CenterTitle(); //g2->SetMarkerStyle(21); //g2->SetMarkerSize(1); g2->Draw("AP*"); TObjArray a1(0); a1.Add(g1); a1.Add(g2); a1.Add(vardm); TFile var_dm("varsig0196_4.root", "recreate"); a1.Write(); var_dm.Close(); timer.Stop(); Double_t rtime = timer.RealTime(); Double_t ctime = timer.CpuTime(); cout << "Real time " << rtime << endl; cout << "CPU time " << ctime << endl; }
void RAA_dataDrivenUnfoldingErrorCheck(int radius = 4, int radiusPP = 4, char* algo = (char*) "Pu", char *jet_type = (char*) "PF", int unfoldingCut = 30, char* etaWidth = (char*) "n20_eta_p20", double deltaEta = 4.0){ TStopwatch timer; timer.Start(); TH1::SetDefaultSumw2(); TH2::SetDefaultSumw2(); bool printDebug = true; // get the data and mc histograms from the output of the read macro. TDatime date;//this is just here to get them to run optimized. // Raghav's files: //TFile * fPbPb_in = TFile::Open(Form("/afs/cern.ch/work/r/rkunnawa/WORK/RAA/CMSSW_5_3_18/src/Output/PbPb_CutEfficiency_YetkinCuts_matched_slantedlinecalopfpt_addingunmatched_exclusionhighertriggers_eMaxSumcand_A_R0p%d.root",radius)); // //TFile * fPP_in = TFile::Open(Form("/afs/cern.ch/work/r/rkunnawa/WORK/RAA/CMSSW_5_3_18/src/Output/Pp_CutEfficiency_YetkinCuts_matched_slantedlinecalopfpt_addingunmatched_exclusionhighertriggers_eMaxSumcand_A_R0p%d.root",radius)); //TFile * fPP_in = TFile::Open(Form("/afs/cern.ch/work/r/rkunnawa/WORK/RAA/CMSSW_5_3_18/src/Output/Pp_CutEfficiency_noJetID_exclusionhighertriggers_A_R0p%d.root",radius)); // Pawan's files: TFile * fPbPb_in = TFile::Open(Form("/afs/cern.ch/work/r/rkunnawa/WORK/RAA/CMSSW_5_3_18/src/Output/Pawan_ntuplehistograms/PbPb_CutEfficiency_YetkinCuts_matched_slantedlinecalopfpt_addingunmatched_exclusionhighertriggers_eMaxSumcand_A_R0p%d.root",radius)); //TFile * fPP_in = TFile::Open(Form("/afs/cern.ch/work/r/rkunnawa/WORK/RAA/CMSSW_5_3_18/src/Output/Pp_CutEfficiency_YetkinCuts_matched_slantedlinecalopfpt_addingunmatched_exclusionhighertriggers_eMaxSumcand_A_R0p%d.root",radius)); TFile * fPP_in = TFile::Open(Form("/afs/cern.ch/work/r/rkunnawa/WORK/RAA/CMSSW_5_3_18/src/Output/Pawan_ntuplehistograms/Pp_CutEfficiency_YetkinCuts_matched_slantedlinecalopfpt_addingunmatched_exclusionhighertriggers_eMaxSumcand_A_R0p%d.root",radius)); TFile * fPbPb_MB_in = TFile::Open(Form("/afs/cern.ch/work/r/rkunnawa/WORK/RAA/CMSSW_5_3_18/src/Output/PbPb_MinBiasUPC_CutEfficiency_YetkinCuts_matched_slantedlinecalopfpt_addingunmatched_exclusionhighertriggers_eMaxSumcand_A_R0p%d.root",radius)); //TH1F * htest = new TH1F("htest","",nbins_pt, boundaries_pt); //Int_t unfoldingCutBin = htest->FindBin(unfoldingCut); cout<<"after input file declaration"<<endl; // need to make sure that the file names are in prefect order so that i can run them one after another. // for the above condition, i might have to play with the date stamp. const int nbins_cent = 6; double boundaries_cent[nbins_cent+1] = {0,2,4,12,20,28,36}; double ncoll[nbins_cent+1] = {1660,1310,745,251,62.8,10.8,362.24}; // histogram declarations with the following initial appendage: d - Data, m - MC, u- Unfolded // for the MC closure test, ive kept separate // setup the radius and the eta bin loop here later. not for the time being. Aug 20th. only run the -2 < eta < 2 with the differenent centrality bins TH1F *dPbPb_TrgComb[nbins_cent+1], *dPbPb_Comb[nbins_cent+1], *dPbPb_Trg80[nbins_cent+1], *dPbPb_Trg65[nbins_cent+1], *dPbPb_Trg55[nbins_cent+1], *dPbPb_1[nbins_cent+1], *dPbPb_2[nbins_cent+1], *dPbPb_3[nbins_cent+1], *dPbPb_80[nbins_cent+1], *dPbPb_65[nbins_cent+1], *dPbPb_55[nbins_cent+1]; TH1F *mPbPb_Gen[nbins_cent+1], *mPbPb_Reco[nbins_cent+1]; TH2F *mPbPb_Matrix[nbins_cent+1], *mPbPb_Response[nbins_cent+1], *mPbPb_ResponseNorm[nbins_cent+1]; TH1F *mPbPb_mcclosure_data[nbins_cent+1]; TH2F *mPbPb_mcclosure_Matrix[nbins_cent+1],*mPbPb_mcclosure_Response[nbins_cent+1], *mPbPb_mcclosure_ResponseNorm[nbins_cent+1]; TH1F *mPbPb_mcclosure_gen[nbins_cent+1]; const int Iterations = 20; //for unfolding systematics. const int BayesIter = 4; TH1F *uPbPb_Bayes[nbins_cent+1], *uPbPb_BinByBin[nbins_cent+1], *uPbPb_SVD[nbins_cent+1]; TH1F *uPbPb_BayesianIter[nbins_cent+1][Iterations]; TH1F *dPbPb_MinBias[nbins_cent]; TH1F *dPP_1, *dPP_2, *dPP_3, *dPP_Comb; TH1F *mPP_Gen, *mPP_Reco; TH2F *mPP_Matrix, *mPP_Response,*mPP_ResponseNorm; TH1F *mPP_mcclosure_data; TH2F *mPP_mcclosure_Matrix, *mPP_mcclosure_Response,*mPP_mcclosure_ResponseNorm; TH1F *mPP_mcclosure_Gen; TH1F *uPP_Bayes, *uPP_BinByBin, *uPP_SVD; TH1F *uPP_BayesianIter[Iterations]; // would be better to read in the histograms and rebin them. come to think of it, it would be better to have them already rebinned (and properly scaled - to the level of differential cross section in what ever barns (inverse micro barns) but keep it consistent) from the read macro. // get PbPb data for(int i = 0;i<nbins_cent;i++){ if(printDebug) cout<<"cent_"<<i<<endl; dPbPb_TrgComb[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_HLTComb_R%d_n20_eta_p20_cent%d",radius,i)); //dPbPb_TrgComb[i]->Scale(4*145.156*1e6); dPbPb_TrgComb[i]->Print("base"); dPbPb_Trg80[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_HLT80_R%d_n20_eta_p20_cent%d",radius,i)); //dPbPb_Trg80[i]->Scale(4*145.156*1e6); dPbPb_Trg80[i]->Print("base"); dPbPb_Trg65[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_HLT65_R%d_n20_eta_p20_cent%d",radius,i)); //dPbPb_Trg65[i]->Scale(4*145.156*1e6); dPbPb_Trg65[i]->Print("base"); dPbPb_Trg55[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_HLT55_R%d_n20_eta_p20_cent%d",radius,i)); //dPbPb_Trg55[i]->Scale(4*145.156*1e6); dPbPb_Trg55[i]->Print("base"); //dPbPb_TrgComb[i] = (TH1F*)dPbPb_Trg80[i]->Clone(Form("Jet_80_triggered_spectra_data_PbPb_cent%d",i)); //dPbPb_MinBias[i] = (TH1F*)fPbPb_MB_in->Get(Form("hpbpb_HLTComb_R%d_n20_eta_p20_cent%d",radius,i)); //dPbPb_MinBias[i]->Print("base"); dPbPb_TrgComb[i]->Scale(1./(145.156 * 1e9)); //dPbPb_MinBias[i]->Scale(1./(161.939 * 1e9)); //dPbPb_TrgComb[i]->Add(dPbPb_MinBias[i]); for(int k = 1;k<=unfoldingCut;k++) { dPbPb_TrgComb[i]->SetBinContent(k,0); dPbPb_Trg80[i]->SetBinContent(k,0); dPbPb_Trg65[i]->SetBinContent(k,0); dPbPb_Trg55[i]->SetBinContent(k,0); } } //Int_t nSVDIter = 4; if(printDebug)cout<<"loaded the data histograms PbPb"<<endl; // get PbPb MC for(int i = 0;i<nbins_cent;i++){ mPbPb_Gen[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_JetComb_gen_R%d_n20_eta_p20_cent%d",radius,i)); //mPbPb_Gen[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_gen_R%d_n20_eta_p20_cent%d",radius,i)); mPbPb_Gen[i]->Print("base"); mPbPb_Reco[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_JetComb_reco_R%d_n20_eta_p20_cent%d",radius,i)); //mPbPb_Reco[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_reco_R%d_n20_eta_p20_cent%d",radius,i)); mPbPb_Reco[i]->Print("base"); mPbPb_Matrix[i] = (TH2F*)fPbPb_in->Get(Form("hpbpb_matrix_HLT_R%d_n20_eta_p20_cent%d",radius,i)); //mPbPb_Matrix[i] = (TH2F*)fPbPb_in->Get(Form("hpbpb_matrix_R%d_n20_eta_p20_cent%d",radius,i)); mPbPb_Matrix[i]->Print("base"); mPbPb_mcclosure_data[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_mcclosure_JetComb_data_R%d_n20_eta_p20_cent%d",radius,i)); mPbPb_mcclosure_data[i]->Print("base"); mPbPb_mcclosure_gen[i] = (TH1F*)fPbPb_in->Get(Form("hpbpb_mcclosure_gen_JetComb_R%d_n20_eta_p20_cent%d",radius,i)); mPbPb_mcclosure_gen[i]->Print("base"); mPbPb_mcclosure_Matrix[i] = (TH2F*)fPbPb_in->Get(Form("hpbpb_mcclosure_matrix_HLT_R%d_n20_eta_p20_cent%d",radius,i)); mPbPb_mcclosure_Matrix[i]->Print("base"); //since SVD is very straight forward, lets do it rignt here: //get the SVD response matrix: //RooUnfoldResponse ruResponse(mPbPb_Matrix[i]->ProjectionY(),mPbPb_Matrix[i]->ProjectionX(), mPbPb_Matrix[i],"",""); //regularization parameter definition: //RooUnfoldSvd unfoldSvd(&ruResponse, dPbPb_TrgComb[i], nSVDIter); //uPbPb_SVD[i] = (TH1F*)unfoldSvd.Hreco(); // for(int k = 1;k<=unfoldingCut;k++){ // mPbPb_Gen[i]->SetBinContent(k,0); // mPbPb_Reco[i]->SetBinContent(k,0); // mPbPb_mcclosure_data[i]->SetBinContent(k,0); // mPbPb_mcclosure_gen[i]->SetBinContent(k,0); // for(int l = 1;l<=1000;l++){ // mPbPb_Matrix[i]->SetBinContent(k,l,0); // mPbPb_mcclosure_Matrix[i]->SetBinContent(k,l,0); // mPbPb_Matrix[i]->SetBinContent(l,k,0); // mPbPb_mcclosure_Matrix[i]->SetBinContent(l,k,0); // } // } //mPbPb_Response[i] = new TH2F(Form("mPbPb_Response_cent%d",i),"Response Matrix",nbins_pt,boundaries_pt,nbins_pt,boundaries_pt); //mPbPb_ResponseNorm[i] = new TH2F(Form("mPbPb_ResponseNorm_cent%d",i),"Normalized Response Matrix",nbins_pt,boundaries_pt,nbins_pt,boundaries_pt); } if(printDebug) cout<<"loaded the data and mc PbPb histograms from the files"<<endl; // get PP data if(printDebug) cout<<"Getting PP data and MC"<<endl; dPP_1 = (TH1F*)fPP_in->Get(Form("hpp_HLT80_R%d_%s",radiusPP,etaWidth)); dPP_1->Print("base"); dPP_2 = (TH1F*)fPP_in->Get(Form("hpp_HLT60_R%d_%s",radiusPP,etaWidth)); dPP_2->Print("base"); dPP_3 = (TH1F*)fPP_in->Get(Form("hpp_HLT40_R%d_%s",radiusPP,etaWidth)); dPP_3->Print("base"); dPP_Comb = (TH1F*)fPP_in->Get(Form("hpp_HLTComb_R%d_%s",radiusPP,etaWidth)); //dPP_Comb = (TH1F*)dPP_1->Clone(Form("hpp_TrgComb_R%d_n20_eta_p20",radiusPP,etaWidth)); dPP_Comb->Print("base"); dPP_Comb->Scale(1./(5.3 * 1e9)); for(int k = 1;k<=unfoldingCut;k++) { dPP_Comb->SetBinContent(k,0); dPP_1->SetBinContent(k,0); dPP_2->SetBinContent(k,0); dPP_3->SetBinContent(k,0); } // get PP MC mPP_Gen = (TH1F*)fPP_in->Get(Form("hpp_JetComb_gen_R%d_%s",radiusPP,etaWidth)); mPP_Gen->Print("base"); mPP_Reco = (TH1F*)fPP_in->Get(Form("hpp_JetComb_reco_R%d_%s",radiusPP,etaWidth)); mPP_Reco->Print("base"); mPP_Matrix = (TH2F*)fPP_in->Get(Form("hpp_matrix_HLT_R%d_%s",radiusPP,etaWidth)); mPP_Matrix->Print("base"); mPP_mcclosure_data = (TH1F*)fPP_in->Get(Form("hpp_mcclosure_JetComb_data_R%d_%s",radiusPP,etaWidth)); mPP_mcclosure_data->Print("base"); mPP_mcclosure_Matrix = (TH2F*)fPP_in->Get(Form("hpp_mcclosure_matrix_HLT_R%d_%s",radiusPP,etaWidth)); mPP_mcclosure_Matrix->Print("base"); //RooUnfoldResponse ruResponsePP(mPP_Matrix->ProjectionY(),mPP_Matrix->ProjectionX(), mPP_Matrix,"",""); //regularization parameter definition: //RooUnfoldSvd unfoldSvdPP(&ruResponsePP, dPP_Comb, nSVDIter); //uPP_SVD = (TH1F*)unfoldSvdPP.Hreco(); // for(int k = 1;k<=unfoldingCut;k++){ // mPP_Gen->SetBinContent(k,0); // mPP_Reco->SetBinContent(k,0); // mPP_mcclosure_data->SetBinContent(k,0); // for(int l = 1;l<=1000;l++){ // mPP_Matrix->SetBinContent(k,l,0); // mPP_mcclosure_Matrix->SetBinContent(k,l,0); // mPP_Matrix->SetBinContent(l,k,0); // mPP_mcclosure_Matrix->SetBinContent(l,k,0); // } // } if(printDebug) cout<<"Filling the PbPb response Matrix"<<endl; // response matrix and unfolding for PbPb // going to try it the way kurt has it. for(int i = 0;i<nbins_cent;i++){ if(printDebug) cout<<"centrality bin iteration = "<<i<<endl; TF1 *f = new TF1("f","[0]*pow(x+[2],[1])"); f->SetParameters(1e10,-8.8,40); // TH1F *hGenSpectraCorr = (TH1F*)mPbPb_Matrix[i]->ProjectionX()->Clone(Form("hGenSpectraCorr_cent%d",i)); // hGenSpectraCorr->Fit("f"," "); // hGenSpectraCorr->Fit("f","",""); // hGenSpectraCorr->Fit("f","LL"); // TH1F *fHist = functionHist(f,hGenSpectraCorr,Form("fHist_cent%d",i));// function that you get from the fitting // hGenSpectraCorr->Divide(fHist); for (int y=1;y<=mPbPb_Matrix[i]->GetNbinsY();y++) { double sum=0; for (int x=1;x<=mPbPb_Matrix[i]->GetNbinsX();x++) { if (mPbPb_Matrix[i]->GetBinContent(x,y)<=1*mPbPb_Matrix[i]->GetBinError(x,y)) { //in the above line mine had 0*getbinerror while Kurt's had 1*. mPbPb_Matrix[i]->SetBinContent(x,y,0); mPbPb_Matrix[i]->SetBinError(x,y,0); } sum+=mPbPb_Matrix[i]->GetBinContent(x,y); } for (int x=1;x<=mPbPb_Matrix[i]->GetNbinsX();x++) { double ratio = 1; // if (hGenSpectraCorr->GetBinContent(x)!=0) ratio = 1e5/hGenSpectraCorr->GetBinContent(x); mPbPb_Matrix[i]->SetBinContent(x,y,mPbPb_Matrix[i]->GetBinContent(x,y)*ratio); mPbPb_Matrix[i]->SetBinError(x,y,mPbPb_Matrix[i]->GetBinError(x,y)*ratio); } } //mPbPb_Matrix[i]->Smooth(0); // Ok major differences here between my code and Kurt in b-jet Tools under Unfold - lines 469 and above. mPbPb_Response[i] = (TH2F*)mPbPb_Matrix[i]->Clone(Form("mPbPb_Response_cent%d",i)); TH1F *hProj = (TH1F*)mPbPb_Response[i]->ProjectionY()->Clone(Form("hProj_cent%d",i)); for (int y=1;y<=mPbPb_Response[i]->GetNbinsY();y++) { double sum=0; for (int x=1;x<=mPbPb_Response[i]->GetNbinsX();x++) { if (mPbPb_Response[i]->GetBinContent(x,y)<=1*mPbPb_Response[i]->GetBinError(x,y)) { // in the above if loop, kurt has 1*error and my old had 0*error mPbPb_Response[i]->SetBinContent(x,y,0); mPbPb_Response[i]->SetBinError(x,y,0); } sum+=mPbPb_Response[i]->GetBinContent(x,y); } for (int x=1;x<=mPbPb_Response[i]->GetNbinsX();x++) { if (sum==0) continue; double ratio = 1; //if(dPbPb_TrgComb[i]->GetBinContent(y)==0) ratio = 1e-100/sum; // else ratio = dPbPb_TrgComb[i]->GetBinContent(y)/sum ratio = 1./sum; if (hProj->GetBinContent(y)==0) ratio = 1e-100/sum; else ratio = hProj->GetBinContent(y)/sum; mPbPb_Response[i]->SetBinContent(x,y,mPbPb_Response[i]->GetBinContent(x,y)*ratio); mPbPb_Response[i]->SetBinError(x,y,mPbPb_Response[i]->GetBinError(x,y)*ratio); } } mPbPb_ResponseNorm[i] = (TH2F*)mPbPb_Matrix[i]->Clone(Form("mPbPb_ResponseNorm_cent%d",i)); for (int x=1;x<=mPbPb_ResponseNorm[i]->GetNbinsX();x++) { double sum=0; for (int y=1;y<=mPbPb_ResponseNorm[i]->GetNbinsY();y++) { if (mPbPb_ResponseNorm[i]->GetBinContent(x,y)<=1*mPbPb_ResponseNorm[i]->GetBinError(x,y)) { mPbPb_ResponseNorm[i]->SetBinContent(x,y,0); mPbPb_ResponseNorm[i]->SetBinError(x,y,0); } sum+=mPbPb_ResponseNorm[i]->GetBinContent(x,y); } for (int y=1;y<=mPbPb_ResponseNorm[i]->GetNbinsY();y++) { if (sum==0) continue; double ratio = 1./sum; mPbPb_ResponseNorm[i]->SetBinContent(x,y,mPbPb_ResponseNorm[i]->GetBinContent(x,y)*ratio); mPbPb_ResponseNorm[i]->SetBinError(x,y,mPbPb_ResponseNorm[i]->GetBinError(x,y)*ratio); } } } if(printDebug) cout<<"Filling PP response Matrix"<<endl; // response matrix for pp. // Kurt doesnt have this whole hGenSpectraCorr thing in his macro. need to check why the difference exists between out codes TF1 *fpp = new TF1("fpp","[0]*pow(x+[2],[1])"); fpp->SetParameters(1e10,-8.8,40); // if(printDebug) cout<<"before getting the gen spectra corr matrix"<<endl; // TH1F *hGenSpectraCorrPP = (TH1F*)mPP_Matrix->ProjectionX()->Clone("hGenSpectraCorrPP"); // if(printDebug) cout<<"after gettign the gen spectra corr matrix"<<endl; // hGenSpectraCorrPP->Fit("f"," "); // hGenSpectraCorrPP->Fit("f","",""); // hGenSpectraCorrPP->Fit("f","LL"); // TH1F *fHistPP = functionHist(fpp,hGenSpectraCorrPP,"fHistPP");// that the function that you get from the fitting // hGenSpectraCorrPP->Divide(fHistPP); for (int y=1;y<=mPP_Matrix->GetNbinsY();y++) { double sum=0; for (int x=1;x<=mPP_Matrix->GetNbinsX();x++) { if (mPP_Matrix->GetBinContent(x,y)<=1*mPP_Matrix->GetBinError(x,y)) { mPP_Matrix->SetBinContent(x,y,0); mPP_Matrix->SetBinError(x,y,0); } sum+=mPP_Matrix->GetBinContent(x,y); } for (int x=1;x<=mPP_Matrix->GetNbinsX();x++) { double ratio = 1; // if (hGenSpectraCorrPP->GetBinContent(x)!=0) ratio = 1e5/hGenSpectraCorrPP->GetBinContent(x); mPP_Matrix->SetBinContent(x,y,mPP_Matrix->GetBinContent(x,y)*ratio); mPP_Matrix->SetBinError(x,y,mPP_Matrix->GetBinError(x,y)*ratio); } } // mPbPb_Matrix[i]->Smooth(0); // Ok major differences here between my code and Kurt in b-jet Tools under Unfold - lines 469 and above. if(printDebug) cout<<"getting the response matrix"<<endl; mPP_Response = (TH2F*)mPP_Matrix->Clone("mPP_Response"); TH1F *hProjPP = (TH1F*)mPP_Response->ProjectionY()->Clone("hProjPP"); for (int y=1;y<=mPP_Response->GetNbinsY();y++) { double sum=0; for (int x=1;x<=mPP_Response->GetNbinsX();x++) { if (mPP_Response->GetBinContent(x,y)<=1*mPP_Response->GetBinError(x,y)) { // in the above if statement, kurt has 1*error and my old has 0*error mPP_Response->SetBinContent(x,y,0); mPP_Response->SetBinError(x,y,0); } sum+=mPP_Response->GetBinContent(x,y); } for (int x=1;x<=mPP_Response->GetNbinsX();x++) { if (sum==0) continue; double ratio = 1; //if(dPbPb_TrgComb[i]->GetBinContent(y)==0) ratio = 1e-100/sum; // else ratio = dPbPb_TrgComb[i]->GetBinContent(y)/sum ratio = 1./sum; if (hProjPP->GetBinContent(y)==0) ratio = 1e-100/sum; else ratio = hProjPP->GetBinContent(y)/sum; mPP_Response->SetBinContent(x,y,mPP_Response->GetBinContent(x,y)*ratio); mPP_Response->SetBinError(x,y,mPP_Response->GetBinError(x,y)*ratio); } } if(printDebug) cout<<"getting the normalized response matrix"<<endl; mPP_ResponseNorm = (TH2F*)mPP_Matrix->Clone("mPP_ResponseNorm"); for (int x=1;x<=mPP_ResponseNorm->GetNbinsX();x++) { double sum=0; for (int y=1;y<=mPP_ResponseNorm->GetNbinsY();y++) { if (mPP_ResponseNorm->GetBinContent(x,y)<=1*mPP_ResponseNorm->GetBinError(x,y)) { mPP_ResponseNorm->SetBinContent(x,y,0); mPP_ResponseNorm->SetBinError(x,y,0); } sum+=mPP_ResponseNorm->GetBinContent(x,y); } for (int y=1;y<=mPP_ResponseNorm->GetNbinsY();y++) { if (sum==0) continue; double ratio = 1./sum; mPP_ResponseNorm->SetBinContent(x,y,mPP_ResponseNorm->GetBinContent(x,y)*ratio); mPP_ResponseNorm->SetBinError(x,y,mPP_ResponseNorm->GetBinError(x,y)*ratio); } } // scale the spectra to the respective units // for(int i = 0;i<nbins_cent;++i){ // dPbPb_TrgComb[i] = (TH1F*)dPbPb_TrgComb[i]->Rebin(nbins_pt,Form("PbPb_measured_spectra_combined_cent%d",i),boundaries_pt); // divideBinWidth(dPbPb_TrgComb[i]); // } // dPP_Comb = (TH1F*)dPP_Comb->Rebin(nbins_pt,"pp_measured_spectra_combined",boundaries_pt); // divideBinWidth(dPP_Comb); // dPP_Comb->Scale(1./ dPP_Comb->GetBinContent(nbins_pt)); // Now that we have all the response matrix for the 6 centralities in PbPb and one pp spectra lets start doing the steps: // we have 39 pt bins, so we need 1000 gaussian functions for each pt bin. Int_t unfoldingTrials = 200; Double_t meanMeasPbPb[nbins_pt][nbins_cent], sigmaMeasPbPb[nbins_pt][nbins_cent]; Double_t meanMeasPP[nbins_pt], sigmaMeasPP[nbins_pt]; Double_t meanUnfoldPbPb[nbins_pt][nbins_cent][unfoldingTrials], sigmaUnfoldPbPb[nbins_pt][nbins_cent][unfoldingTrials]; Double_t meanUnfoldPP[nbins_pt][unfoldingTrials], sigmaUnfoldPP[nbins_pt][unfoldingTrials]; TRandom3 *random = new TRandom3(0); for(int u = 0;u<unfoldingTrials;++u){ cout<<"unfolding trial no = "<<u+1<<endl; for(int j = 0;j<nbins_pt;++j){ for(int i = 0;i<nbins_cent;++i){ meanMeasPbPb[j][i] = dPbPb_TrgComb[i]->GetBinContent(j+1); sigmaMeasPbPb[j][i] = dPbPb_TrgComb[i]->GetBinError(j+1); }// centrality loop meanMeasPP[j] = dPP_Comb->GetBinContent(j+1); sigmaMeasPP[j] = dPP_Comb->GetBinContent(j+1); }// nbins_pt loop // now proceed to unfolding for each trial. for(int i = 0;i<nbins_cent;++i){ //cout<<"centrality = "<<i<<endl; TH1F * hPreUnfoldingSpectra = new TH1F("hPreUnfoldingSpectra","",nbins_pt,0,nbins_pt); TH1F * hAfterUnfoldingSpectra; for(int j = 0;j<nbins_pt;++j){ hPreUnfoldingSpectra->SetBinContent(j+1, random->Gaus(meanMeasPbPb[j][i], sigmaMeasPbPb[j][i])); hPreUnfoldingSpectra->SetBinError(j+1, sigmaMeasPbPb[j][i]/sqrt(unfoldingTrials)); //if(j==100)cout << " before unfolding bin " << j << " value = " << hPreUnfoldingSpectra->GetBinContent(j+1)<<endl; //if(j==100)cout << " before unfolding bin " << j << " error = " << hPreUnfoldingSpectra->GetBinError(j+1)<<endl; }// nbins_pt loop TH1F* hMCGen = (TH1F*)mPbPb_Response[i]->ProjectionX(); removeZero(hMCGen); //cout << " MC bin " << 100 << " value = " << hMCGen->GetBinContent(100)<<endl; bayesianUnfold myUnfoldingMulti(mPbPb_Matrix[i], hMCGen, 0); myUnfoldingMulti.unfold(hPreUnfoldingSpectra, BayesIter); hAfterUnfoldingSpectra = (TH1F*) myUnfoldingMulti.hPrior->Clone("hAfterUnfoldingSpectra"); for(int j = 0;j<nbins_pt;++j){ //if(j==100)cout << " before unfolding bin " << j << " value = " << hPreUnfoldingSpectra->GetBinContent(j+1)<<endl; //if(j==100)cout << " after unfolding bin " << j << " value = " << hAfterUnfoldingSpectra->GetBinContent(j+1)<<endl; meanUnfoldPbPb[j][i][u] = hAfterUnfoldingSpectra->GetBinContent(j+1); sigmaUnfoldPbPb[j][i][u] = hAfterUnfoldingSpectra->GetBinError(j+1); // cout << "after unfolding meanUnfoldPbPb[" << j << "][" << i << "][" << u<< "] = " <<meanUnfoldPbPb[j][i][u]<<" "; // cout << "after unfolding meanUnfoldPbPb[" << j << "][" << i << "][" << u<< "] = " <<sigmaUnfoldPbPb[j][i][u]<<endl; }// nbins_pt loop //hPreUnfoldingSpectra->Print("base"); //hAfterUnfoldingSpectra->Print("base"); delete hPreUnfoldingSpectra; delete hAfterUnfoldingSpectra; delete hMCGen; }// centrality loop cout<<"pp "<<endl; // now do it for the pp: TH1F * hPreUnfoldingSpectraPP = new TH1F("hPreUnfoldingSpectraPP","",nbins_pt,0,nbins_pt); TH1F * hAfterUnfoldingSpectraPP; for(int j = 0;j<nbins_pt;++j){ hPreUnfoldingSpectraPP->SetBinContent(j+1, random->Gaus(meanMeasPP[j], sigmaMeasPP[j])); hPreUnfoldingSpectraPP->SetBinError(j+1, sigmaMeasPP[j]/sqrt(unfoldingTrials)); }// nbins_pt loop TH1F* hMCGenPP = (TH1F*)mPP_Response->ProjectionX(); removeZero(hMCGenPP); bayesianUnfold myUnfoldingMultiPP(mPP_Matrix, hMCGenPP, 0); myUnfoldingMultiPP.unfold(hPreUnfoldingSpectraPP, BayesIter); hAfterUnfoldingSpectraPP = (TH1F*) myUnfoldingMultiPP.hPrior->Clone("hAfterUnfoldingSpectraPP"); for(int j = 0;j<nbins_pt;++j){ meanUnfoldPP[j][u] = hAfterUnfoldingSpectraPP->GetBinContent(j+1); sigmaUnfoldPP[j][u] = hAfterUnfoldingSpectraPP->GetBinError(j+1); }// nbins_pt loop delete hPreUnfoldingSpectraPP; delete hAfterUnfoldingSpectraPP; delete hMCGenPP; }// unfolding trials loop // Now that we have all the necesary values we need, lets proceed to fill a histogram with the mean values for each ptbin and get the corrected values. TH1F * hAfterUnfoldingptBinDistribution[nbins_pt]; TH1F * hCorrUnfoldingPbPb[nbins_cent]; for(int i = 0;i<nbins_cent;++i){ hCorrUnfoldingPbPb[i] = new TH1F(Form("PbPb_BayesianUnfolded_cent%d",i),"Spectra after correction", nbins_pt, 0, nbins_pt); for(int j = 0;j<nbins_pt;++j){ //hAfterUnfoldingptBinDistribution[j] = new TH1F(Form("hAfterUnfoldingptBinDistribution_ptBin%d",j),"",100, (meanMeasPbPb[j][i]-10) * sigmaMeasPbPb[j][i], (meanMeasPbPb[j][i]+10) * sigmaMeasPbPb[j][i]); hAfterUnfoldingptBinDistribution[j] = new TH1F(Form("hAfterUnfoldingptBinDistribution_ptBin%d",j),"",100, 0, 1); for(int u = 0;u<unfoldingTrials;++u){ hAfterUnfoldingptBinDistribution[j]->Fill(meanUnfoldPbPb[j][i][u]); //if(j==100) cout<< "unfolding_trial = " << u+1 << " mean unfold value = "<< meanUnfoldPbPb[j][i][u] <<endl; }// unfolding trials loop //if(j==100) cout<<"Mean of that value for pt=100 = "<< (Float_t)hAfterUnfoldingptBinDistribution[j]->GetMean() <<endl; hCorrUnfoldingPbPb[i]->SetBinContent(j+1, hAfterUnfoldingptBinDistribution[j]->GetMean()); //cout<<"centrality bin "<<i<<", pT bin "<<j<<" bin Content = "<<hCorrUnfoldingPbPb[i]->GetBinContent(j+1)<<endl; hCorrUnfoldingPbPb[i]->SetBinError(j+1, hAfterUnfoldingptBinDistribution[j]->GetRMS()); //cout<<"centrality bin "<<i<<", pT bin "<<j<<" bin Error = "<<hCorrUnfoldingPbPb[i]->GetBinError(j+1)<<endl; delete hAfterUnfoldingptBinDistribution[j]; }// nbins_pt loop }// centrality loop // similar for the pp: TH1F * hAfterUnfoldingptBinDistributionPP[nbins_pt]; TH1F * hCorrUnfoldingPP; hCorrUnfoldingPP = new TH1F("PP_BayesianUnfolded","Spectra after unfolding error correction",nbins_pt, 0, nbins_pt); for(int j = 0;j<nbins_pt;++j){ //hAfterUnfoldingptBinDistributionPP[j] = new TH1F(Form("hAfterUnfoldingptBinDistributionPP_ptBin%d",j),"",1000,(meanMeasPP[j]-10) * sigmaMeasPP[j], (meanMeasPP[j]+10) * sigmaMeasPP[j]); hAfterUnfoldingptBinDistributionPP[j] = new TH1F(Form("hAfterUnfoldingptBinDistributionPP_ptBin%d",j),"",100, 0, 1); for(int u = 0;u<unfoldingTrials;++u){ hAfterUnfoldingptBinDistributionPP[j]->Fill(meanUnfoldPP[j][u]); }// unfolding trials loop hCorrUnfoldingPP->SetBinContent(j+1, hAfterUnfoldingptBinDistributionPP[j]->GetMean()); //cout<<"PP pT bin "<<j<<" bin Content = "<<hCorrUnfoldingPP->GetBinContent(j+1)<<endl; hCorrUnfoldingPP->SetBinError(j+1, hAfterUnfoldingptBinDistributionPP[j]->GetRMS()); //cout<<"PP pT bin "<<j<<" bin Error = "<<hCorrUnfoldingPP->GetBinError(j+1)<<endl; delete hAfterUnfoldingptBinDistributionPP[j]; }// nbins_pt loop TFile f(Form("../../Output/Pawan_ntuple_PbPb_R%d_pp_R%d_%s_unfoldingCut_%d_data_driven_correction_ak%s%s_%d.root",radius, radiusPP, etaWidth ,unfoldingCut,algo,jet_type,date.GetDate()),"RECREATE"); f.cd(); for(int i = 0;i<nbins_cent;i++) { hCorrUnfoldingPbPb[i]->Scale(145.156 * 1e9); //hCorrUnfoldingPbPb[i] = (TH1F*)hCorrUnfoldingPbPb[i]->Rebin(nbins_pt_coarse, Form("PbPb_BayesianUnfolded_cent%d",i), boundaries_pt_coarse); hCorrUnfoldingPbPb[i]->Write(); hCorrUnfoldingPbPb[i]->Print("base"); dPbPb_TrgComb[i]->Scale(145.156 * 1e9); //dPbPb_TrgComb[i] = (TH1F*)dPbPb_TrgComb[i]->Rebin(nbins_pt_coarse, Form("PbPb_measured_cent%d",i), boundaries_pt_coarse); dPbPb_TrgComb[i]->Write(); dPbPb_TrgComb[i]->Print("base"); } hCorrUnfoldingPP->Scale(5.3 * 1e9); //hCorrUnfoldingPP = (TH1F*)hCorrUnfoldingPP->Rebin(nbins_pt_coarse, "PP_BayesianUnfolded", boundaries_pt_coarse); hCorrUnfoldingPP->Write(); hCorrUnfoldingPP->Print("base"); dPP_Comb->Scale(5.3 * 1e9); //dPP_Comb = (TH1F*)dPP_Comb->Rebin(nbins_pt_coarse, "PP_measured", boundaries_pt_coarse); dPP_Comb->Write(); dPP_Comb->Print("base"); f.Write(); f.Close(); timer.Stop(); if(printDebug) cout<<"CPU time (mins) = "<<(Float_t)timer.CpuTime()/60<<endl; if(printDebug) cout<<"Real tile (mins) = "<<(Float_t)timer.RealTime()/60<<endl; }
void ana_Main_MC(TString ds="relval", TString physics="ttbar") { gSystem->Load("libSusyEvent.so"); // Look ../jec/JetMETObjects/README gSystem->Load("../jec/lib/libJetMETObjects.so"); // Printing utility for ntuple variables gROOT->LoadMacro("SusyEventPrinter.cc+"); // Main analysis code gROOT->LoadMacro("SusyMainAna_MC.cc+"); // chain of inputs TChain* chain = new TChain("susyTree"); //////////////// MC files ///////////////// cout<<"I survive this long1 "<< which_MC_to_use<< endl; MCpoint* thisMCpoint = setupMCpoint(which_MC_to_use); cout<<"I survive this long2"<<endl; chain->Add(thisMCpoint->filepath.c_str()); cout<<"I survive this long"<<endl; //chain->Add("../susyEvents_AB_1M_ho200_v2.root"); //chain->Add("../susyEvents_newNatural.root"); //last used!! //chain->Add("/eos/uscms/store/user/abarker/MC/newNat350_225/MC_AB_2500k_NEWnaturalHiggsinoNLSPout_mst_350_M3_5025_mu_225.root");//same thing as ../susyEvents_newNatural.root //chain->Add("/eos/uscms/store/user/abarker/MC/st_250_ho_150/MC_AB_2500k_st_250_ho_150.root"); //chain->Add("/eos/uscms/store/user/abarker/MC/st_250_ho_200/MC_AB_2500k_st_250_ho_200.root"); //chain->Add("/eos/uscms/store/user/abarker/MC/st_350_ho_200/MC_AB_2500k_mst_350_mu_200.root"); //chain->Add("/eos/uscms/store/user/abarker/MC/ho_140/MC_AB_2500k_ho_140.root"); //chain->Add("/eos/uscms/store/user/abarker/MC/ho_200/MC_AB_2500k_ho_200.root"); //chain->Add("../susyEvents_newNatural.root"); //chain->Add("dcache:/pnfs/cms/WAX/resilient/abarker/MC/MC_AB_2500k_NEWnaturalHiggsinoNLSPout_mst_350_M3_5025_mu_225.root"); //chain->Add("dcache:/pnfs/cms/WAX/resilient/abarker/MC/MC_AB_2500k_st_250_ho_150.root"); //chain->Add("dcache:/pnfs/cms/WAX/resilient/abarker/MC/MC_AB_2500k_st_250_ho_200.root"); //chain->Add("dcache:/pnfs/cms/WAX/resilient/abarker/MC/MC_AB_2500k_mst_350_mu_200.root"); //chain->Add("dcache:/pnfs/cms/WAX/resilient/abarker/MC/MC_AB_2500k_ho_140.root"); //chain->Add("dcache:/pnfs/cms/WAX/resilient/abarker/MC/MC_AB_2500k_ho_200.root"); SusyMainAna_MC* sea = new SusyMainAna_MC(chain); // configuration parameters // any values given here will replace the default values sea->SetDataset(physics+"_"+ds); // dataset name sea->SetPrintInterval(1e4); // print frequency sea->SetPrintLevel(0); // print level for event contents sea->SetUseTrigger(false); /* sea->AddHltName("HLT_Photon36_CaloIdL_Photon22_CaloIdL"); // add HLT trigger path name sea->AddHltName("HLT_Photon32_CaloIdL_Photon26_CaloIdL"); // add HLT trigger path name sea->AddHltName("HLT_Photon26_R9Id85_Photon18_R9Id85_Mass60"); sea->AddHltName("HLT_Photon26_R9Id85_Photon18_CaloId10_Iso50_Mass60"); sea->AddHltName("HLT_Photon26_CaloId10_Iso50_Photon18_R9Id85_Mass60"); sea->AddHltName("HLT_Photon26_CaloId10_Iso50_Photon18_CaloId10_Iso50_Mass60"); sea->AddHltName("HLT_Photon26_R9Id85_OR_CaloId10_Iso50_Photon18_R9Id85_OR_CaloId10_Iso50_Mass60"); sea->AddHltName("HLT_Photon26_R9Id85_OR_CaloId10_Iso50_Photon18_R9Id85_OR_CaloId10_Iso50_Mass70"); sea->AddHltName("HLT_Photon36_R9Id85_Photon22_R9Id85"); sea->AddHltName("HLT_Photon36_R9Id85_Photon22_CaloId10_Iso50"); sea->AddHltName("HLT_Photon36_CaloId10_Iso50_Photon22_R9Id85"); sea->AddHltName("HLT_Photon36_CaloId10_Iso50_Photon22_CaloId10_Iso50"); sea->AddHltName("HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon22_R9Id85_OR_CaloId10_Iso50"); */ sea->SetFilter(false); // filter events passing final cuts sea->SetProcessNEvents(-1); // number of events to be processed // as an example -- add your favorite Json here. More than one can be "Include"ed // sea->IncludeAJson("Cert_161079-161352_7TeV_PromptReco_Collisions11_JSON_noESpbl_v2.txt"); //sea->IncludeAJson("anotherJSON.txt"); TStopwatch ts; ts.Start(); sea->Loop(); ts.Stop(); std::cout << "RealTime : " << ts.RealTime()/60.0 << " minutes" << std::endl; std::cout << "CPUTime : " << ts.CpuTime()/60.0 << " minutes" << std::endl; }