void makesimpleplot(void) { // set_plot_style(); TChain *chain = new TChain("OSTwoLepAna/summaryTree"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_1.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_10.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_11.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_12.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_13.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_14.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_15.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_16.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_17.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_18.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_19.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_2.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_20.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_3.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_4.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_5.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_6.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_7.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_8.root"); chain->Add("root://eoscms.cern.ch//eos/cms/store/user/muell149/ttH-leptons_Skims/acceptance_study_v5/ttHJetToNonbb_M125_13TeV_amcatnloFXFX_madspin_pythia8/crab_ttH125/150916_225227/0000/multilep_michaeltest_deleteme_9.root"); int chainentries = chain->GetEntries(); cout << "tree entries: " << chainentries << endl; Int_t cachesize = 100000000; //100 MBytes chain->SetCacheSize(cachesize); //<<< chain->SetCacheLearnEntries(20); double mcwgt_intree = -999.; double wgt_intree = -999.; int hDecay_intree = -999; int eventNum_intree = -999; vector<ttH::GenParticle> *pruned_genParticles_intree = 0; vector<ttH::Electron> *raw_electrons_intree = 0; vector<ttH::Electron> *preselected_electrons_intree = 0; vector<ttH::Electron> *tight_electrons_intree = 0; vector<ttH::Muon> *raw_muons_intree = 0; vector<ttH::Muon> *preselected_muons_intree = 0; vector<ttH::Muon> *tight_muons_intree = 0; vector<ttH::Lepton> *tight_leptons_intree = 0; vector<ttH::Lepton> *preselected_leptons_intree = 0; vector<ttH::Lepton> raw_leptons; chain->SetBranchAddress("mcwgt", &mcwgt_intree); chain->SetBranchAddress("wgt", &wgt_intree); chain->SetBranchAddress("eventnum", &eventNum_intree); chain->SetBranchAddress("higgs_decay", &hDecay_intree); chain->SetBranchAddress("pruned_genParticles", &pruned_genParticles_intree); chain->SetBranchAddress("raw_electrons", &raw_electrons_intree); chain->SetBranchAddress("preselected_electrons", &preselected_electrons_intree); chain->SetBranchAddress("tightMvaBased_electrons", &tight_electrons_intree); chain->SetBranchAddress("raw_muons", &raw_muons_intree); chain->SetBranchAddress("preselected_muons", &preselected_muons_intree); chain->SetBranchAddress("tightMvaBased_muons", &tight_muons_intree); chain->SetBranchAddress("tightMvaBased_leptons", &tight_leptons_intree); chain->SetBranchAddress("preselected_leptons", &preselected_leptons_intree); int positiveCharge; int negativeCharge; double leadPt; double trailPt; int duplicate = 0; int total_count = 0; int ss2l_reco_count =0; int ss2l_reco_agree_count =0; int ss2l_gen_count =0; int ss2l_ee_gen_count =0; int ss2l_mm_gen_count =0; int ss2l_em_gen_count =0; int ss2l_me_gen_count =0; int l3_reco_count =0; int l3_reco_agree_count =0; int l3_gen_count =0; int l4_reco_count =0; int l4_reco_agree_count =0; int l4_gen_count =0; int ss2l_PS_count = 0; int ss2l_raw_count = 0; int l3_PS_count = 0; int l3_raw_count = 0; int l4_PS_count = 0; int l4_raw_count = 0; int wgt; //pure rate study vector<int> cut_vec_ele_int (7,0); vector<int> cut_vec_mu_int (7,0); int raw_ele_size = 0; int raw_mu_size = 0; //2D plot vars vector<ttH::GenParticle> genMuons; vector<ttH::GenParticle> genElectrons; vector<ttH::GenParticle> genLeptonsMatched; vector<ttH::GenParticle> genLeptonsUnmatched; int xRange = 40; int yRange = 40; int xBins = 20; int yBins = 20; int xBinSize = xRange/xBins; int yBinSize = yRange/yBins; TH2D *pT_hist_2lss = new TH2D("leading and trailing lepton pT 2lss","2lss",xBins,0,xRange,yBins,0,yRange); TH2D *pT_hist_2lss_ee = new TH2D("leading and trailing lepton pT 2lss","2lss ee",xBins,0,xRange,yBins,0,yRange); TH2D *pT_hist_2lss_mm = new TH2D("leading and trailing lepton pT 2lss","2lss mumu",xBins,0,xRange,yBins,0,yRange); TH2D *pT_hist_2lss_em = new TH2D("leading and trailing lepton pT 2lss","2lss e mu",xBins,0,xRange,yBins,0,yRange); TH2D *pT_hist_2lss_me = new TH2D("leading and trailing lepton pT 2lss","2lss mu e",xBins,0,xRange,yBins,0,yRange); TH2D *pT_hist_3l = new TH2D("leading and trailing lepton pT 3l","3l",xBins,0,xRange,yBins,0,yRange); TH2D *pT_hist_4l = new TH2D("leading and trailing lepton pT 4l","4l",xBins,0,xRange,yBins,0,yRange); TH1D *pT_hist_matched = new TH1D("pT","pT matched",100,0,200); TH1D *pT_hist_unmatched = new TH1D("pT","pT unmatched",100,0,200); TH1D *eta_hist_matched = new TH1D("delta R","dR between matched gen-reco",50,0,0.075); TH1D *id_hist_matched = new TH1D("pdgID","pdgID of un matched PS lep",20,0,20); TH1D *id_hist_matched_parent = new TH1D("pdgID","pdgID of un matched PS lep grandparent",600,0,600); TH1D *charge_hist_2lss = new TH1D("charge","extra lep charge symmetry between 2lss",5,-2,3); TH1D *pt_hist_2lss = new TH1D("pt","extra lep pt ordering between 2lss",7,-2,5); TH1D *hist_numPsLeps_2lss = new TH1D("PS lepton multiplicity","PS lepton multiplicity 2lss",8,0,8); TH1D *hist_numPsLeps_3l = new TH1D("PS lepton multiplicity","PS lepton multiplicity 3l",8,0,8); TH1D *hist_numPsLeps_4l = new TH1D("PS lepton multiplicity","PS lepton multiplicity 4l",8,0,8); for (int i=0; i<chainentries; i++) { //report every 40k events //if (i == 1000) break; //testing feature if (i % 10000 == 0) std::cout << int(100.*i/chainentries) << "% of events processed" << std::endl; chain->GetEntry(i); if (mcwgt_intree > 0) wgt =1; else wgt =-1; total_count +=wgt; positiveCharge = 0; negativeCharge = 0; vector<ttH::GenParticle> signalLeps; for (const auto & genParticle : *pruned_genParticles_intree) { if ((abs(genParticle.pdgID) == 11 && genParticle.obj.pt() > 0) || (abs(genParticle.pdgID) == 13 && genParticle.obj.pt() > 0) ) { if (genParticle.isPromptFinalState || genParticle.isDirectPromptTauDecayProductFinalState) { if (genParticle.pdgID > 0) positiveCharge +=1; if (genParticle.pdgID < 0) negativeCharge +=1; if (abs(genParticle.pdgID) == 11) genElectrons.push_back(genParticle); if (abs(genParticle.pdgID) == 13) genMuons.push_back(genParticle); signalLeps.push_back(genParticle); } } } bool found_duplicate = false; vector<ttH::Lepton> raw_matches; for (auto &promptGenLep: signalLeps) { ttH::Lepton match = findRecoMatch(promptGenLep, *raw_electrons_intree, *raw_muons_intree); for (auto & raw_match: raw_matches) { if (match.obj.pt() == raw_match.obj.pt()) { duplicate+=wgt; found_duplicate = true; break; } } if (found_duplicate) break; else raw_matches.push_back(match); } if (found_duplicate) continue; raw_leptons = GetCollection(*raw_muons_intree,*raw_electrons_intree); auto matchedLepTuple = findMatchedCollections(raw_matches,*raw_electrons_intree,*preselected_electrons_intree,*raw_muons_intree,*preselected_muons_intree); vector<ttH::Electron> matched_raw_electrons = std::get<0>(matchedLepTuple); vector<ttH::Electron> matched_ps_electrons = std::get<1>(matchedLepTuple); vector<ttH::Muon> matched_raw_muons = std::get<2>(matchedLepTuple); vector<ttH::Muon> matched_ps_muons = std::get<3>(matchedLepTuple); vector<ttH::Lepton> ps_matches = GetCollection(matched_ps_muons,matched_ps_electrons); //2lss if ((positiveCharge == 2 && negativeCharge == 0) || (negativeCharge ==2 && positiveCharge == 0)) //2lss { ttH::Lepton leadMatch = findRecoMatch(signalLeps[0], *raw_electrons_intree, *raw_muons_intree); ttH::Lepton trailMatch = findRecoMatch(signalLeps[1], *raw_electrons_intree, *raw_muons_intree); leadPt = leadMatch.obj.pt(); trailPt = trailMatch.obj.pt(); vector<ttH::Lepton> raw_matches; raw_matches.push_back(leadMatch); raw_matches.push_back(trailMatch); if (leadPt == trailPt) { duplicate += wgt; continue; } hist_numPsLeps_2lss->Fill(preselected_leptons_intree->size()*wgt); auto matchedLepTuple = findMatchedCollections(raw_matches,*raw_electrons_intree,*preselected_electrons_intree,*raw_muons_intree,*preselected_muons_intree); vector<ttH::Electron> matched_raw_electrons = std::get<0>(matchedLepTuple); vector<ttH::Electron> matched_ps_electrons = std::get<1>(matchedLepTuple); vector<ttH::Muon> matched_raw_muons = std::get<2>(matchedLepTuple); vector<ttH::Muon> matched_ps_muons = std::get<3>(matchedLepTuple); vector<ttH::Lepton> ps_matches = GetCollection(matched_ps_muons,matched_ps_electrons); ss2l_PS_count += ps_matches.size()*wgt; ss2l_raw_count += raw_matches.size()*wgt; vector<bool> promptVector; vector<bool> promptTauVector; vector<ttH::Lepton> unmatchedPSLeptons; vector<int> motherIdVector; vector<int> grandMotherIdVector; vector<int> unMatchedGrandMotherIdVector; bool lead_ps_raw_match = false; bool trail_ps_raw_match = false; for (const auto & lep : *preselected_leptons_intree) { if (leadMatch.obj.pt() == lep.obj.pt()) { lead_ps_raw_match = true; motherIdVector.push_back(abs(lep.genMotherPdgID)); grandMotherIdVector.push_back(abs(lep.genGrandMotherPdgID)); pT_hist_matched->Fill(lep.obj.pt()); } else if (trailMatch.obj.pt() == lep.obj.pt()) { trail_ps_raw_match = true; motherIdVector.push_back(abs(lep.genMotherPdgID)); grandMotherIdVector.push_back(abs(lep.genGrandMotherPdgID)); pT_hist_matched->Fill(lep.obj.pt()); } else { if (preselected_leptons_intree->size() == 3 ) { if (lep.charge == leadMatch.charge) charge_hist_2lss->Fill(1); else charge_hist_2lss->Fill(-1); if (lep.obj.pt() < trailMatch.obj.pt() && lep.obj.pt() < leadMatch.obj.pt()) pt_hist_2lss->Fill(-1); else if (lep.obj.pt() > trailMatch.obj.pt() && lep.obj.pt() < leadMatch.obj.pt()) pt_hist_2lss->Fill(1); else if (lep.obj.pt() > leadMatch.obj.pt()) pt_hist_2lss->Fill(3); id_hist_matched->Fill(abs(lep.pdgID)); } unmatchedPSLeptons.push_back(lep); } } //START For finding the missing PS lepton vector<ttH::Electron> ps_unmatched_raw_electrons; vector<ttH::Muon> ps_unmatched_raw_muons; if (!lead_ps_raw_match) { if (abs(leadMatch.pdgID) == 13) { for (auto &mu: matched_raw_muons) if (leadMatch.obj.pt() == mu.obj.pt()) ps_unmatched_raw_muons.push_back(mu); } else { for (auto &ele: matched_raw_electrons) if (leadMatch.obj.pt() == ele.obj.pt()) ps_unmatched_raw_electrons.push_back(ele); } } if (!trail_ps_raw_match) { if (abs(trailMatch.pdgID) == 13) { for (auto &mu: matched_raw_muons) if (trailMatch.obj.pt() == mu.obj.pt()) ps_unmatched_raw_muons.push_back(mu); } else { for (auto &ele: matched_raw_electrons) if (trailMatch.obj.pt() == ele.obj.pt()) ps_unmatched_raw_electrons.push_back(ele); } } preselectionEff(ps_unmatched_raw_muons, &cut_vec_mu_int, &raw_mu_size, ps_unmatched_raw_electrons, &cut_vec_ele_int, &raw_ele_size); //END For finding the missing PS lepton motherIdVector.clear(); grandMotherIdVector.clear(); eta_hist_matched->Fill(reco::deltaR(signalLeps[0].obj.eta(),signalLeps[0].obj.phi(),leadMatch.obj.eta(),leadMatch.obj.phi())); eta_hist_matched->Fill(reco::deltaR(signalLeps[1].obj.eta(),signalLeps[1].obj.phi(),trailMatch.obj.eta(),trailMatch.obj.phi())); fill_2D(pT_hist_2lss,leadPt,trailPt,wgt, xBinSize, yBinSize); ss2l_gen_count += wgt; if (abs(leadMatch.pdgID) == 11) { if (abs(trailMatch.pdgID) == 11) { fill_2D(pT_hist_2lss_ee,leadPt,trailPt,wgt, xBinSize, yBinSize); ss2l_ee_gen_count += wgt; } else if (abs(trailMatch.pdgID) == 13) { fill_2D(pT_hist_2lss_em,leadPt,trailPt,wgt, xBinSize, yBinSize); ss2l_em_gen_count += wgt; } } else if (abs(leadMatch.pdgID) == 13) { if (abs(trailMatch.pdgID) == 11) { fill_2D(pT_hist_2lss_me,leadPt,trailPt,wgt, xBinSize, yBinSize); ss2l_me_gen_count += wgt; } else if (abs(trailMatch.pdgID) == 13) { fill_2D(pT_hist_2lss_mm,leadPt,trailPt,wgt, xBinSize, yBinSize); ss2l_mm_gen_count += wgt; } } if (tight_leptons_intree->size() == 2 && abs((*tight_leptons_intree)[0].charge+(*tight_leptons_intree)[1].charge) == 2) { // leadPt = (*tight_leptons_intree)[0].obj.pt(); // trailPt = (*tight_leptons_intree)[1].obj.pt(); // fill_2D(pT_hist_2lss,leadPt,trailPt,wgt, xBinSize, yBinSize); ss2l_reco_agree_count +=wgt; } } else if (positiveCharge+negativeCharge == 3) //3l { l3_PS_count += preselected_leptons_intree->size()*wgt; l3_raw_count += raw_leptons.size()*wgt; hist_numPsLeps_3l->Fill(preselected_leptons_intree->size()*wgt); ttH::Lepton leadMatch = findRecoMatch(signalLeps[0], *raw_electrons_intree, *raw_muons_intree); ttH::Lepton subLeadMatch = findRecoMatch(signalLeps[1], *raw_electrons_intree, *raw_muons_intree); ttH::Lepton trailMatch = findRecoMatch(signalLeps[2], *raw_electrons_intree, *raw_muons_intree); vector<ttH::Lepton> raw_matches; raw_matches.push_back(leadMatch); raw_matches.push_back(subLeadMatch); raw_matches.push_back(trailMatch); if (leadMatch.obj.pt() == subLeadMatch.obj.pt() || subLeadMatch.obj.pt() == trailMatch.obj.pt() || leadMatch.obj.pt() == trailMatch.obj.pt()) { duplicate += wgt; continue; } // leadPt = leadMatch.obj.pt(); // trailPt = trailMatch.obj.pt(); // leadPt = signalLeps[0].obj.pt(); // trailPt = signalLeps[2].obj.pt(); // fill_2D(pT_hist_3l,leadPt,trailPt,wgt, xBinSize, yBinSize); // pT_hist_matched->Fill(abs(signalLeps[0].obj.pt()-leadPt)); // pT_hist_matched->Fill(abs(signalLeps[2].obj.pt()-trailPt)); // eta_hist_matched->Fill(deltaR(signalLeps[0],leadMatch)); // eta_hist_matched->Fill(deltaR(signalLeps[2],trailMatch)); l3_gen_count += wgt; if (tight_leptons_intree->size() == 3) { // leadPt = (*tight_leptons_intree)[0].obj.pt(); // trailPt = (*tight_leptons_intree)[2].obj.pt(); // fill_2D(pT_hist_3l,leadPt,trailPt,wgt, xBinSize, yBinSize); l3_reco_agree_count +=wgt; } } else if (positiveCharge+negativeCharge >= 4) //4l { l4_PS_count += preselected_leptons_intree->size()*wgt; l4_raw_count += raw_leptons.size()*wgt; hist_numPsLeps_4l->Fill(preselected_leptons_intree->size()*wgt); // ttH::Lepton leadMatch = findRecoMatch(signalLeps[0], *raw_electrons_intree, *raw_muons_intree); // ttH::Lepton trailMatch = findRecoMatch(signalLeps[3], *raw_electrons_intree, *raw_muons_intree); // leadPt = leadMatch.obj.pt(); // trailPt = trailMatch.obj.pt(); // leadPt = signalLeps[0].obj.pt(); // trailPt = signalLeps[3].obj.pt(); // fill_2D(pT_hist_4l,leadPt,trailPt,wgt, xBinSize, yBinSize); // pT_hist_matched->Fill(abs(signalLeps[0].obj.pt()-leadPt)); // pT_hist_matched->Fill(abs(signalLeps[3].obj.pt()-trailPt)); // eta_hist_matched->Fill(deltaR(signalLeps[0],leadMatch)); // eta_hist_matched->Fill(deltaR(signalLeps[3],trailMatch)); l4_gen_count += wgt; if (preselected_leptons_intree->size() >= 4) { // leadPt = (*preselected_leptons_intree)[0].obj.pt(); // trailPt = (*preselected_leptons_intree)[preselected_leptons_intree->size()-1].obj.pt(); // fill_2D(pT_hist_4l,leadPt,trailPt,wgt, xBinSize, yBinSize); l4_reco_agree_count +=wgt; } } } std::cout << "event loop complete. drawing hists." << std::endl; gStyle->SetOptStat(0); // TCanvas* pT_can = new TCanvas("pt can", "pt can"); // TLegend *pT_leg = new TLegend(0.83,0.66,0.99,0.77,NULL,"brNDC"); // TLegend *pT_leg = new TLegend(0.83,0.66,0.99,0.77); // pT_hist_matched->SetLineColor(1); // pT_hist_unmatched->SetLineColor(2); // pT_hist_matched->Scale(1/pT_hist_matched->Integral()); // pT_hist_unmatched->Scale(1/pT_hist_unmatched->Integral()); // pT_hist_matched->Draw(); // pT_hist_matched->Draw("same"); // pT_leg->AddEntry(pT_hist_matched,"gen-matched","l"); // pT_leg->AddEntry(pT_hist_unmatched,"not gen-matched","l"); // pT_leg->Draw("same"); TCanvas* id_can = new TCanvas("id can", "id can"); id_hist_matched->Scale(1./id_hist_matched->Integral()); id_hist_matched->Draw(); // TLegend *id_leg = new TLegend(0.83,0.66,0.99,0.77,NULL,"brNDC"); // id_hist_unnmatched->SetLineColor(1); // id_hist_unmatched->SetLineColor(2); // id_hist_matched->SetLineColor(kBlue); // id_hist_unmatched->Draw("same"); // id_hist_matched->Draw("same"); // id_leg->AddEntry(id_hist_matched,"matched W grandmothers","l"); // id_leg->AddEntry(id_hist_unmatched,"un matched W grandmother","l"); // id_leg->AddEntry(id_hist_unnmatched,"both matched","l"); // id_leg->Draw("same"); TCanvas* charge_can1 = new TCanvas("charge can1", "charge can1"); charge_hist_2lss->Scale(1./charge_hist_2lss->Integral()); charge_hist_2lss->Draw(); TCanvas* pt_can1 = new TCanvas("pt can1", "pt can1"); pt_hist_2lss->Scale(1./pt_hist_2lss->Integral()); pt_hist_2lss->Draw(); TCanvas* id_can1 = new TCanvas("id can1", "id can1"); id_hist_matched_parent->Scale(1./id_hist_matched_parent->Integral()); id_hist_matched_parent->Draw(); TCanvas* pT_can = new TCanvas("pt can", "pt can"); TLegend *pT_leg = new TLegend(0.83,0.66,0.99,0.77,NULL,"brNDC"); pT_hist_matched->SetLineColor(1); pT_hist_unmatched->SetLineColor(2); //pT_hist_matched->Scale(1/pT_hist_matched->Integral()); // pT_hist_unmatched->Scale(1/pT_hist_unmatched->Integral()); pT_hist_matched->Draw(); pT_hist_unmatched->Draw("same"); pT_leg->AddEntry(pT_hist_matched,"matched","l"); pT_leg->AddEntry(pT_hist_unmatched,"not matched","l"); pT_leg->Draw("same"); TCanvas* can_numPS2 = new TCanvas("num can2", "num can2"); hist_numPsLeps_2lss->Scale(1./hist_numPsLeps_2lss->Integral()); lepMultiplicity(hist_numPsLeps_2lss, "2lss"); hist_numPsLeps_2lss->Draw(); TCanvas* can_numPS3 = new TCanvas("num can3", "num can3"); hist_numPsLeps_3l->Scale(1./hist_numPsLeps_3l->Integral()); lepMultiplicity(hist_numPsLeps_3l, "3l"); hist_numPsLeps_3l->Draw(); TCanvas* can_numPS4 = new TCanvas("num can4", "num can4"); hist_numPsLeps_4l->Scale(1./hist_numPsLeps_4l->Integral()); lepMultiplicity(hist_numPsLeps_4l, "4l"); hist_numPsLeps_4l->Draw(); std::cout << "double matches " << duplicate << std::endl; std::cout << "total_count = " << total_count << std::endl; std::cout << "ss2l_reco_count = " << ss2l_reco_count << std::endl; std::cout << "ss2l_reco_agree_count = " << ss2l_reco_agree_count << std::endl; std::cout << "ss2l_gen_count = " << ss2l_gen_count << std::endl; std::cout << "l3_reco_count = " << l3_reco_count << std::endl; std::cout << "l3_reco_agree_count = " << l3_reco_agree_count << std::endl; std::cout << "l3_gen_count = " << l3_gen_count << std::endl; std::cout << " > l4_reco_count = " << l4_reco_count << std::endl; std::cout << " > l4_reco_agree_count = " << l4_reco_agree_count << std::endl; std::cout << "> l4_gen_count = " << l4_gen_count << std::endl; std::cout << "> ss2l_PS_count = " << ss2l_PS_count << std::endl; std::cout << "> ss2l_raw_count = " << ss2l_raw_count << std::endl; std::cout << "> ss2l_eff = " << ss2l_PS_count/ss2l_raw_count << std::endl; std::cout << "> l3_PS_count = " << l3_PS_count << std::endl; std::cout << "> l3_raw_count = " << l3_raw_count << std::endl; std::cout << "> l3_eff = " << l3_PS_count/l3_raw_count << std::endl; std::cout << "> l4_PS_count = " << l4_PS_count << std::endl; std::cout << "> l4_raw_count = " << l4_raw_count << std::endl; std::cout << "> l4_eff = " << l4_PS_count/l4_raw_count << std::endl; int i = 0; for (int & frac : cut_vec_mu_int) { std::cout << frac << " " << raw_mu_size << std::endl; std::cout << "mu cut # " << i << " is removing: " << 100. - 100.*double(frac)/double(raw_mu_size) << "%" << std::endl; //std::cout << "mu cut # " << i << " failed uniquely: " << double(frac)/double(raw_mu_size) << "%" << std::endl; i +=1; } int j = 0; for (int & frac : cut_vec_ele_int) { std::cout << frac << " " << raw_mu_size << std::endl; std::cout << "ele cut # " << j << " is removing: " << 100. - 100.*double(frac)/double(raw_ele_size) << "%" << std::endl; // std::cout << "ele cut # " << j << " failed uniquely: " << double(frac)/double(raw_ele_size) << "%" << std::endl; j +=1; } }
/** * 1. Data sample : pp200 W->e nu with pile-up corresponding to 1 MHz min. bias * events, 50 K event y2011, 10 K event y2012. * * 2. Proof of principal: no pile-up for both PPV and KFV * * a. Reconstructed primary track multiplicity versus corresponding MC * "reconstructable" (i.e. in n STAR acceptance,no. TPC MC hits >= 15) tracks * multiplicity. * * b. Corrected reconstructed primary track multiplicity (i.e. multiplied by * QA/100.) versus corresponding MC "reconstructable" (i.e. in n STAR * acceptance,no. TPC MC hits >= 15) tracks multiplicity. * * c. Efficiency primary vertex reconstruction versus MC "reconstructable" * tracks multiplicity. * * 3. With pileup. repeat above (a-c) with old ranking scheme for * * I. Any reconstructed primary vertex which is matched with MC trigger * vertex (MC = 1) * * II. The best (in sense of ranking) reconstructed primary vertex which is * matched with MC trigger vertex (MC = 1) * * III. The best (in sense of ranking) reconstructed primary vertex which is * not matched with MC trigger vertex (MC != 1) * * 4. With pileup. repeat above (a-c) with new ranking scheme for cases I-III */ void MuMcPrVKFV2012(Long64_t nevent, const char *file, const std::string& outFile, bool fillNtuple) { #ifdef __TMVA__ boost::replace_last(outFile, ".root", ""); outFile += ".TMVArank.root"; // create a set of variables and declare them to the reader // - the variable names must corresponds in name and type to // those given in the weight file(s) that you use TString separator(":"); TString Vnames(vnames); TObjArray *array = Vnames.Tokenize(separator); std::vector<std::string> inputVars; TIter next(array); TObjString *objs; while ((objs = (TObjString *) next())) { std::cout << objs->GetString() << std::endl; } inputVars.push_back("beam"); inputVars.push_back("postx"); inputVars.push_back("prompt"); inputVars.push_back("cross"); inputVars.push_back("tof"); inputVars.push_back("notof"); inputVars.push_back("EEMC"); inputVars.push_back("noEEMC"); inputVars.push_back("chi2"); std::vector<double> *inputVec = new std::vector<double>( inputVars.size() ); IClassifierReader *classReader = new ReadBDT( inputVars ); #endif /* __TMVA__ */ TFile *fOut = TFile::Open(outFile.c_str(), "recreate"); data_t data; // Book histograms const int nMcRecMult = 75; TArrayD xMult(nMcRecMult + 1); xMult[0] = -0.5; for (int i = 1; i <= nMcRecMult; i++) { if (xMult[i - 1] < 50) xMult[i] = xMult[i - 1] + 1; // 1 - 50 else if (xMult[i - 1] < 100) xMult[i] = xMult[i - 1] + 2; // 51 - 75 else if (xMult[i - 1] < 200) xMult[i] = xMult[i - 1] + 10; // 76 - 85 else xMult[i] = xMult[i - 1] + 100; // 86 -100 } TH1D *McRecMulT = new TH1D("McRecMulT", "Reconstructable multiplicity for trigger Mc Vertex", nMcRecMult, xMult.GetArray()); struct Name_t { const Char_t *Name; const Char_t *Title; }; const Name_t HCases[3] = { {"Any", "Any vertex matched with MC == 1"}, {"Good", "The best rank vertex with MC == 1"}, {"Bad", "The best rank vertex with MC != 1"} }; const Name_t Plots[4] = { {"Mult" , "the reconstructed (uncorrected) track multiplicity versus Reconstructable multiplicity"}, {"MultQA" , "the reconstructed (corrected for QA) track multiplicity versus Reconstructable multiplicity"}, {"McRecMul", "Reconstructable multiplicity"}, {"YvsX" , "Bad versus Good value"} }; /* h p */ TH1 *hists[3][4]; for (int h = 0; h < 3; h++) { for (int p = 0; p < 4; p++) { TString Name(Plots[p].Name); Name += HCases[h].Name; TString Title(Plots[p].Title); Title += " for "; Title += HCases[h].Title; Title += " vertex"; if (p < 2) hists[h][p] = new TH2D(Name, Title, nMcRecMult, xMult.GetArray(), nMcRecMult, xMult.GetArray()); else if (p == 2) hists[h][p] = new TH1D(Name, Title, nMcRecMult, xMult.GetArray()); } } TNtuple *VertexG = new TNtuple("VertexG", "good vertex & global params info", vnames); TNtuple *VertexB = new TNtuple("VertexB", "bad vertex & global params info", vnames); // ---------------------------------------------- StMuDstMaker *maker = new StMuDstMaker(0, 0, "", file, "st:MuDst.root", 1e9); // set up maker in read mode // 0,0 this mean read mode // dir read all files in this directory // file bla.lis real all file in this list, if (file!="") dir is ignored // filter apply filter to filenames, multiple filters are separated by ':' // 10 maximum number of file to read maker->SetStatus("*", 0); std::vector<std::string> activeBranchNames = { "MuEvent", "PrimaryVertices", "StStMuMcVertex", "StStMuMcTrack" }; // Set Active braches for (const auto& branchName : activeBranchNames) maker->SetStatus(branchName.c_str(), 1); TChain *tree = maker->chain(); Long64_t nentries = tree->GetEntries(); nevent = TMath::Min(nevent, nentries); std::cout << nentries << " events in chain " << nevent << " will be read." << std::endl; tree->SetCacheSize(-1); //by setting the read cache to -1 we set it to the AutoFlush value when writing tree->SetCacheLearnEntries(1); //one entry is sufficient to learn tree->SetCacheEntryRange(0, nevent); for (Long64_t ev = 0; ev < nevent; ev++) { if (maker->Make()) break; StMuDst *muDst = maker->muDst(); // get a pointer to the StMuDst class, the class that points to all the data StMuEvent *muEvent = muDst->event(); // get a pointer to the class holding event-wise information int referenceMultiplicity = muEvent->refMult(); // get the reference multiplicity TClonesArray *PrimaryVertices = muDst->primaryVertices(); int nPrimaryVertices = PrimaryVertices->GetEntriesFast(); TClonesArray *MuMcVertices = muDst->mcArray(0); int nMuMcVertices = MuMcVertices->GetEntriesFast(); TClonesArray *MuMcTracks = muDst->mcArray(1); int nMuMcTracks = MuMcTracks->GetEntriesFast(); if ( nevent >= 10 && ev % int(nevent*0.1) == 0 ) { std::cout << "Event #" << ev << "\tRun\t" << muEvent->runId() << "\tId: " << muEvent->eventId() << " refMult= " << referenceMultiplicity << "\tPrimaryVertices " << nPrimaryVertices << "\t" << " " << nMuMcVertices << "\t" << " " << nMuMcTracks << std::endl; } // const Double_t field = muEvent->magneticField()*kilogauss; if (! nMuMcVertices || ! nMuMcTracks || nPrimaryVertices <= 0) { std::cout << "Ev. " << ev << " has no MC information ==> skip it" << std::endl; std::cout << "OR no reconstructed verticies found" << std::endl; continue; } // Count number of MC tracks at a vertex with TPC reconstructable tracks std::multimap<int, int> Mc2McHitTracks; for (int m = 0; m < nMuMcTracks; m++) { StMuMcTrack *McTrack = (StMuMcTrack *) MuMcTracks->UncheckedAt(m); if (McTrack->No_tpc_hit() < 15) continue; Mc2McHitTracks.insert(std::pair<int, int>(McTrack->IdVx(), McTrack->Id())); } // This is the "reconstructable" track multiplicity int nMcTracksWithHits = Mc2McHitTracks.count(1); // Let's skip events in which we do not expect to reconstruct any tracks // (and thus vertex) from the primary vertex if (nMcTracksWithHits <= 0) continue; // This is our denominator histogram for efficiencies McRecMulT->Fill(nMcTracksWithHits); // ============= Build map between Rc and Mc vertices std::map<StMuPrimaryVertex *, StMuMcVertex *> reco2McVertices; TArrayF vertexRanks(nPrimaryVertices); int mcMatchedVertexIndex = -1; // any vertex with MC==1 and highest reconstrated multiplicity. int vertexMaxMultiplicity = -1; // First loop over all verticies in this event. There is at least one // must be available for (int recoVertexIndex = 0; recoVertexIndex < nPrimaryVertices; recoVertexIndex++) { vertexRanks[recoVertexIndex] = -1e10; StMuPrimaryVertex *recoVertex = (StMuPrimaryVertex *) PrimaryVertices->UncheckedAt(recoVertexIndex); if ( !AcceptVX(recoVertex) ) continue; // Check Mc if (recoVertex->idTruth() < 0 || recoVertex->idTruth() > nMuMcVertices) { std::cout << "ERROR: Illegal idTruth " << recoVertex->idTruth() << " The track is ignored" << std::endl; continue; } StMuMcVertex *mcVertex = (StMuMcVertex *) MuMcVertices->UncheckedAt(recoVertex->idTruth() - 1); if (mcVertex->Id() != recoVertex->idTruth()) { std::cout << "ERROR: Mismatched idTruth " << recoVertex->idTruth() << " and mcVertex Id " << mcVertex->Id() << " The vertex is ignored" << std::endl; continue; } reco2McVertices[recoVertex] = mcVertex; vertexRanks[recoVertexIndex] = recoVertex->ranking(); if (recoVertex->idTruth() == 1 && recoVertex->noTracks() > vertexMaxMultiplicity) { mcMatchedVertexIndex = recoVertexIndex; vertexMaxMultiplicity = recoVertex->noTracks(); } FillData(data, recoVertex); #ifdef __TMVA__ Float_t *dataArray = &data.beam; for (size_t j = 0; j < inputVec->size(); j++) (*inputVec)[j] = dataArray[j]; vertexRanks[recoVertexIndex] = classReader->GetMvaValue( *inputVec ); #endif } // If we reconstructed a vertex which matches the MC one we fill the // numerator of the "Any" efficiency histogram if (mcMatchedVertexIndex != -1) { StMuPrimaryVertex *recoVertexMatchedMc = (StMuPrimaryVertex*) PrimaryVertices->UncheckedAt(mcMatchedVertexIndex); double nTracks = recoVertexMatchedMc->noTracks(); double nTracksQA = nTracks * recoVertexMatchedMc->qaTruth() / 100.; hists[0][0]->Fill(nMcTracksWithHits, nTracks); hists[0][1]->Fill(nMcTracksWithHits, nTracksQA); hists[0][2]->Fill(nMcTracksWithHits); } // Now deal with the highest rank vertex int maxRankVertexIndex = TMath::LocMax(nPrimaryVertices, vertexRanks.GetArray()); StMuPrimaryVertex *recoVertexMaxRank = (StMuPrimaryVertex*) PrimaryVertices->UncheckedAt(maxRankVertexIndex); StMuMcVertex *mcVertex = reco2McVertices[recoVertexMaxRank]; double nTracks = recoVertexMaxRank->noTracks(); double nTracksQA = nTracks * recoVertexMaxRank->qaTruth() / 100.; // Fill numerator for "good" and "bad" efficiencies int h = ( mcVertex && mcVertex->Id() == 1) ? 1 : 2; hists[h][0]->Fill(nMcTracksWithHits, nTracks); hists[h][1]->Fill(nMcTracksWithHits, nTracksQA); hists[h][2]->Fill(nMcTracksWithHits); // Proceed with filling ntuple only if requested by the user if ( !fillNtuple ) continue; // Second loop over all verticies in this event for (int recoVertexIndex = 0; recoVertexIndex < nPrimaryVertices; recoVertexIndex++) { StMuPrimaryVertex *recoVertex = (StMuPrimaryVertex *) PrimaryVertices->UncheckedAt(recoVertexIndex); if ( !AcceptVX(recoVertex) ) continue; StMuMcVertex *mcVertex = reco2McVertices[recoVertex]; if ( !mcVertex ) { std::cout << "No Match from RC to MC" << std::endl; continue; } if (vtxeval::gDebugFlag) { std::cout << Form("Vx[%3i]", recoVertexIndex) << *recoVertex << " " << *mcVertex; int nMcTracksWithHitsatL = Mc2McHitTracks.count(recoVertex->idTruth()); std::cout << Form("Number of McTkHit %4i rank %8.3f", nMcTracksWithHitsatL, vertexRanks[recoVertexIndex]); } int IdPar = mcVertex->IdParTrk(); if (IdPar > 0 && IdPar <= nMuMcTracks) { StMuMcTrack *mcTrack = (StMuMcTrack *) MuMcTracks->UncheckedAt(IdPar - 1); if (mcTrack && vtxeval::gDebugFlag) std::cout << " " << mcTrack->GeName(); } FillData(data, recoVertex); double nTracks = recoVertex->noTracks(); if (mcVertex->Id() == 1 && nTracks == vertexMaxMultiplicity) {// good VertexG->Fill(&data.beam); } else { // bad VertexB->Fill(&data.beam); } } if ( !gROOT->IsBatch() ) { if (vtxeval::ask_user()) return; } else { vtxeval::gDebugFlag = false; } } fOut->Write(); }
void trainElectronEnergyRegression_ECAL(char* trainingFile, char* outWeightFile, char* optionChar, int nTrees) { // Setting up training option std::string optionStr(optionChar); // ******** If option is V00, V01, V02, etc. ********* // if (optionStr == "V00" || optionStr == "V01") { GBRTrainer *traineb = new GBRTrainer; GBRTrainer *trainebvar = new GBRTrainer; GBRTrainer *trainee = new GBRTrainer; GBRTrainer *traineevar = new GBRTrainer; TTree *intree = 0; cout << "Training on file " << trainingFile << " with version " << optionChar << endl; TChain *chainele = new TChain("eleIDdir/T1"); chainele->Add(trainingFile); chainele->LoadTree(0); chainele->SetCacheSize(64*1024*1024); chainele->SetCacheLearnEntries(); intree = chainele; traineb->AddTree(chainele); trainebvar->AddTree(chainele); trainee->AddTree(chainele); traineevar->AddTree(chainele); TCut traincut = "pt>0";//////////////////////////////// TCut evtcut; TCut evtcutvar; TCut statusenergycut; //if you want to train also energy variance evtcut = "event%2==0 "; evtcutvar = "event%2==1 "; statusenergycut="(GeneratedEnergyStatus3-GeneratedEnergyStatus1)/GeneratedEnergyStatus3<0.01 && GeneratedEnergyStatus3>=GeneratedEnergyStatus1"; traineb->SetTrainingCut(std::string(traincut && evtcut && statusenergycut && "abs(eta)<1.479 && mcmatch==1")); trainee->SetTrainingCut(std::string(traincut && evtcut && statusenergycut && "abs(eta)>1.479 && abs(eta)<2.5 && mcmatch==1")); //turn this off for now trainebvar->SetTrainingCut(std::string(traincut && evtcutvar && statusenergycut && "abs(eta)<1.479 && mcmatch==1")); traineevar->SetTrainingCut(std::string(traincut && evtcutvar && statusenergycut && "abs(eta)>1.479 && abs(eta)<2.5 && mcmatch==1")); const double maxsig = 3.0; const double shrinkage = 0.1; traineb->SetMinEvents(200); traineb->SetShrinkage(shrinkage); traineb->SetMinCutSignificance(maxsig); trainebvar->SetMinEvents(200); trainebvar->SetShrinkage(shrinkage); trainebvar->SetMinCutSignificance(maxsig); trainee->SetMinEvents(200); trainee->SetShrinkage(shrinkage); trainee->SetMinCutSignificance(maxsig); traineevar->SetMinEvents(200); traineevar->SetShrinkage(shrinkage); traineevar->SetMinCutSignificance(maxsig); traineb->SetTargetVar("GeneratedEnergyStatus3/SCRawEnergy"); trainebvar->SetTargetVar("abs( targeteb - GeneratedEnergyStatus3/SCRawEnergy) "); trainee->SetTargetVar("GeneratedEnergyStatus3/(SCRawEnergy*(1+PreShowerOverRaw))"); traineevar->SetTargetVar("abs( targetee - GeneratedEnergyStatus3/(SCRawEnergy*(1+PreShowerOverRaw))) "); std::vector<std::string> *varsf = new std::vector<std::string>; varsf->push_back("SCRawEnergy"); varsf->push_back("scEta"); varsf->push_back("scPhi"); varsf->push_back("R9"); varsf->push_back("E5x5Seed/SCRawEnergy"); varsf->push_back("etawidth"); varsf->push_back("phiwidth"); varsf->push_back("NClusters"); varsf->push_back("HoE"); varsf->push_back("rho"); varsf->push_back("vertices"); varsf->push_back("EtaSeed-scEta"); varsf->push_back("atan2(sin(PhiSeed-scPhi),cos(PhiSeed-scPhi))"); varsf->push_back("ESeed/SCRawEnergy"); varsf->push_back("E3x3Seed/ESeed"); varsf->push_back("E5x5Seed/ESeed"); varsf->push_back("see"); varsf->push_back("spp"); // varsf->push_back("sep"); varsf->push_back("EMaxSeed/ESeed"); varsf->push_back("E2ndSeed/ESeed"); varsf->push_back("ETopSeed/ESeed"); varsf->push_back("EBottomSeed/ESeed"); varsf->push_back("ELeftSeed/ESeed"); varsf->push_back("ERightSeed/ESeed"); varsf->push_back("E2x5MaxSeed/ESeed"); varsf->push_back("E2x5TopSeed/ESeed"); varsf->push_back("E2x5BottomSeed/ESeed"); varsf->push_back("E2x5LeftSeed/ESeed"); varsf->push_back("E2x5RightSeed/ESeed"); std::vector<std::string> *varseb = new std::vector<std::string>(*varsf); std::vector<std::string> *varsee = new std::vector<std::string>(*varsf); varseb->push_back("IEtaSeed"); varseb->push_back("IPhiSeed"); varseb->push_back("IEtaSeed%5"); varseb->push_back("IPhiSeed%2"); varseb->push_back("(abs(IEtaSeed)<=25)*(IEtaSeed%25) + (abs(IEtaSeed)>25)*((IEtaSeed-25*abs(IEtaSeed)/IEtaSeed)%20)"); varseb->push_back("IPhiSeed%20"); varseb->push_back("EtaCrySeed"); varseb->push_back("PhiCrySeed"); varsee->push_back("PreShowerOverRaw"); for (int i=0; i<varseb->size(); ++i) { cout << "var " << i << " = " << varseb->at(i) << endl; traineb->AddInputVar(varseb->at(i)); trainebvar->AddInputVar(varseb->at(i)); } for (int i=0; i<varsee->size(); ++i) { cout << "var " << i << " = " << varsee->at(i) << endl; trainee->AddInputVar(varsee->at(i)); traineevar->AddInputVar(varsee->at(i)); } ROOT::Cintex::Cintex::Enable(); // TFile *ftmp = new TFile("tmpfile.root","RECREATE"); GBRApply gbrapply; //Train Barrel Energy Regression intree->LoadTree(0); const GBRForest *foresteb = traineb->TrainForest(nTrees); delete traineb; //Apply Barrel Energy Regression intree->LoadTree(0); gbrapply.ApplyAsFriend(intree, foresteb, *varseb, "targeteb"); //Train Barrel Variance Regression intree->LoadTree(0); const GBRForest *forestebvar = trainebvar->TrainForest(nTrees); delete trainebvar; //Train Endcap Energy Regression intree->LoadTree(0); const GBRForest *forestee = trainee->TrainForest(nTrees); delete trainee; //Apply Endcap Energy Regression intree->LoadTree(0); gbrapply.ApplyAsFriend(intree, forestee, *varsee, "targetee"); //Train Endcap Variance Regression intree->LoadTree(0); const GBRForest *foresteevar = traineevar->TrainForest(nTrees); delete traineevar; TString fname; fname = outWeightFile; TFile *fout = new TFile(fname,"RECREATE"); cout << "Saving weights to file " << fname << endl; fout->WriteObject(foresteb,"EBCorrection"); fout->WriteObject(forestebvar,"EBUncertainty"); fout->WriteObject(forestee,"EECorrection"); fout->WriteObject(foresteevar,"EEUncertainty"); fout->WriteObject(varseb, "varlisteb"); fout->WriteObject(varsee, "varlistee"); // ftmp->Close(); // fout->Close(); } // ******** If option is V10, V11, V12, etc. ******** // // *** Do training separately for low and high pT *** // if (optionStr == "V10" || optionStr == "V11") { GBRTrainer *traineb_lowPt = new GBRTrainer; GBRTrainer *traineb_highPt = new GBRTrainer; GBRTrainer *trainebvar_lowPt = new GBRTrainer; GBRTrainer *trainebvar_highPt = new GBRTrainer; GBRTrainer *trainee_lowPt = new GBRTrainer; GBRTrainer *trainee_highPt = new GBRTrainer; GBRTrainer *traineevar_lowPt = new GBRTrainer; GBRTrainer *traineevar_highPt = new GBRTrainer; TTree *intree = 0; cout << "Training on file " << trainingFile << " with version " << optionChar << endl; TChain *chainele = new TChain("eleIDdir/T1"); chainele->Add(trainingFile); chainele->LoadTree(0); chainele->SetCacheSize(64*1024*1024); chainele->SetCacheLearnEntries(); intree = chainele; traineb_lowPt->AddTree(chainele); trainebvar_lowPt->AddTree(chainele); trainee_lowPt->AddTree(chainele); traineevar_lowPt->AddTree(chainele); traineb_highPt->AddTree(chainele); trainebvar_highPt->AddTree(chainele); trainee_highPt->AddTree(chainele); traineevar_highPt->AddTree(chainele); TCut traincut_lowPt = "pt>7 && pt<15"; TCut traincut_highPt = "pt>=15"; TCut statusenergycut; TCut evtcut; TCut evtcutvar; //if you want to train also energy variance evtcut = "event%2==0 "; evtcutvar = "event%2==1 "; statusenergycut="(GeneratedEnergyStatus3-GeneratedEnergyStatus1)/GeneratedEnergyStatus3<0.01 && GeneratedEnergyStatus3>=GeneratedEnergyStatus1"; traineb_lowPt->SetTrainingCut(std::string(traincut_lowPt && evtcut && statusenergycut && "abs(eta)<1.479 && mcmatch==1")); trainee_lowPt->SetTrainingCut(std::string(traincut_lowPt && evtcut && statusenergycut && "abs(eta)>1.479 && abs(eta)<2.5 && mcmatch==1")); traineb_highPt->SetTrainingCut(std::string(traincut_highPt && evtcut && statusenergycut && "abs(eta)<1.479 && mcmatch==1")); trainee_highPt->SetTrainingCut(std::string(traincut_highPt && evtcut && statusenergycut && "abs(eta)>1.479 && abs(eta)<2.5&& mcmatch==1")); //turn this off for now trainebvar_lowPt->SetTrainingCut(std::string(traincut_lowPt && evtcutvar && statusenergycut && "abs(eta)<1.479 && mcmatch==1")); traineevar_lowPt->SetTrainingCut(std::string(traincut_lowPt && evtcutvar && statusenergycut && "abs(eta)>1.479 && abs(eta)<2.5 && mcmatch==1")); trainebvar_highPt->SetTrainingCut(std::string(traincut_highPt && evtcutvar && statusenergycut && "abs(eta)<1.479 && mcmatch==1")); traineevar_highPt->SetTrainingCut(std::string(traincut_highPt && evtcutvar && statusenergycut && "abs(eta)>1.479 && abs(eta)<2.5&& mcmatch==1")); const double maxsig = 3.0; const double shrinkage = 0.1; traineb_lowPt->SetMinEvents(200); traineb_lowPt->SetShrinkage(shrinkage); traineb_lowPt->SetMinCutSignificance(maxsig); traineb_highPt->SetMinEvents(200); traineb_highPt->SetShrinkage(shrinkage); traineb_highPt->SetMinCutSignificance(maxsig); trainebvar_lowPt->SetMinEvents(200); trainebvar_lowPt->SetShrinkage(shrinkage); trainebvar_lowPt->SetMinCutSignificance(maxsig); trainebvar_highPt->SetMinEvents(200); trainebvar_highPt->SetShrinkage(shrinkage); trainebvar_highPt->SetMinCutSignificance(maxsig); trainee_lowPt->SetMinEvents(200); trainee_lowPt->SetShrinkage(shrinkage); trainee_lowPt->SetMinCutSignificance(maxsig); trainee_highPt->SetMinEvents(200); trainee_highPt->SetShrinkage(shrinkage); trainee_highPt->SetMinCutSignificance(maxsig); traineevar_lowPt->SetMinEvents(200); traineevar_lowPt->SetShrinkage(shrinkage); traineevar_lowPt->SetMinCutSignificance(maxsig); traineevar_highPt->SetMinEvents(200); traineevar_highPt->SetShrinkage(shrinkage); traineevar_highPt->SetMinCutSignificance(maxsig); traineb_lowPt->SetTargetVar("GeneratedEnergyStatus3/SCRawEnergy"); traineb_highPt->SetTargetVar("GeneratedEnergyStatus3/SCRawEnergy"); trainebvar_lowPt->SetTargetVar("abs( targeteb_lowPt - GeneratedEnergyStatus3/SCRawEnergy) "); trainebvar_highPt->SetTargetVar("abs( targeteb_highPt - GeneratedEnergyStatus3/SCRawEnergy) "); trainee_lowPt->SetTargetVar("GeneratedEnergyStatus3/(SCRawEnergy*(1+PreShowerOverRaw))"); trainee_highPt->SetTargetVar("GeneratedEnergyStatus3/(SCRawEnergy*(1+PreShowerOverRaw))"); traineevar_lowPt->SetTargetVar("abs( targetee_lowPt - GeneratedEnergyStatus3/(SCRawEnergy*(1+PreShowerOverRaw))) "); traineevar_highPt->SetTargetVar("abs( targetee_highPt - GeneratedEnergyStatus3/(SCRawEnergy*(1+PreShowerOverRaw))) "); std::vector<std::string> *varsf = new std::vector<std::string>; varsf->push_back("SCRawEnergy"); varsf->push_back("scEta"); varsf->push_back("scPhi"); varsf->push_back("R9"); varsf->push_back("E5x5Seed/SCRawEnergy"); varsf->push_back("etawidth"); varsf->push_back("phiwidth"); varsf->push_back("NClusters"); varsf->push_back("HoE"); varsf->push_back("rho"); varsf->push_back("vertices"); varsf->push_back("EtaSeed-scEta"); varsf->push_back("atan2(sin(PhiSeed-scPhi),cos(PhiSeed-scPhi))"); varsf->push_back("ESeed/SCRawEnergy"); varsf->push_back("E3x3Seed/ESeed"); varsf->push_back("E5x5Seed/ESeed"); varsf->push_back("see"); varsf->push_back("spp"); // varsf->push_back("sep"); varsf->push_back("EMaxSeed/ESeed"); varsf->push_back("E2ndSeed/ESeed"); varsf->push_back("ETopSeed/ESeed"); varsf->push_back("EBottomSeed/ESeed"); varsf->push_back("ELeftSeed/ESeed"); varsf->push_back("ERightSeed/ESeed"); varsf->push_back("E2x5MaxSeed/ESeed"); varsf->push_back("E2x5TopSeed/ESeed"); varsf->push_back("E2x5BottomSeed/ESeed"); varsf->push_back("E2x5LeftSeed/ESeed"); varsf->push_back("E2x5RightSeed/ESeed"); std::vector<std::string> *varseb = new std::vector<std::string>(*varsf); std::vector<std::string> *varsee = new std::vector<std::string>(*varsf); varseb->push_back("IEtaSeed"); varseb->push_back("IPhiSeed"); varseb->push_back("IEtaSeed%5"); varseb->push_back("IPhiSeed%2"); varseb->push_back("(abs(IEtaSeed)<=25)*(IEtaSeed%25) + (abs(IEtaSeed)>25)*((IEtaSeed-25*abs(IEtaSeed)/IEtaSeed)%20)"); varseb->push_back("IPhiSeed%20"); varseb->push_back("EtaCrySeed"); varseb->push_back("PhiCrySeed"); varsee->push_back("PreShowerOverRaw"); for (int i=0; i<varseb->size(); ++i) { cout << "var " << i << " = " << varseb->at(i) << endl; traineb_lowPt->AddInputVar(varseb->at(i)); trainebvar_lowPt->AddInputVar(varseb->at(i)); } for (int i=0; i<varseb->size(); ++i) { cout << "var " << i << " = " << varseb->at(i) << endl; traineb_highPt->AddInputVar(varseb->at(i)); trainebvar_highPt->AddInputVar(varseb->at(i)); } for (int i=0; i<varsee->size(); ++i) { cout << "var " << i << " = " << varsee->at(i) << endl; trainee_lowPt->AddInputVar(varsee->at(i)); traineevar_lowPt->AddInputVar(varsee->at(i)); } for (int i=0; i<varsee->size(); ++i) { cout << "var " << i << " = " << varsee->at(i) << endl; trainee_highPt->AddInputVar(varsee->at(i)); traineevar_highPt->AddInputVar(varsee->at(i)); } ROOT::Cintex::Cintex::Enable(); // TFile *ftmp = new TFile("tmpfile.root","RECREATE"); GBRApply gbrapply; //Train Barrel Energy Regression (low pT) intree->LoadTree(0); const GBRForest *foresteb_lowPt = traineb_lowPt->TrainForest(nTrees); delete traineb_lowPt; //Apply Barrel Energy Regression (low pT) intree->LoadTree(0); gbrapply.ApplyAsFriend(intree, foresteb_lowPt, *varseb, "targeteb_lowPt"); //Train Barrel Energy Regression (high pT) intree->LoadTree(0); const GBRForest *foresteb_highPt = traineb_highPt->TrainForest(nTrees); delete traineb_highPt; //Apply Barrel Energy Regression (high pT) intree->LoadTree(0); gbrapply.ApplyAsFriend(intree, foresteb_highPt, *varseb, "targeteb_highPt"); //Train Barrel Variance Regression (low pT) intree->LoadTree(0); const GBRForest *forestebvar_lowPt = trainebvar_lowPt->TrainForest(nTrees); delete trainebvar_lowPt; //Train Barrel Variance Regression (high pT) intree->LoadTree(0); const GBRForest *forestebvar_highPt = trainebvar_highPt->TrainForest(nTrees); delete trainebvar_highPt; //Train Endcap Energy Regression (low pT) intree->LoadTree(0); const GBRForest *forestee_lowPt = trainee_lowPt->TrainForest(nTrees); delete trainee_lowPt; //Apply Endcap Energy Regression intree->LoadTree(0); gbrapply.ApplyAsFriend(intree, forestee_lowPt, *varsee, "targetee_lowPt"); //Train Endcap Energy Regression (high pT) intree->LoadTree(0); const GBRForest *forestee_highPt = trainee_highPt->TrainForest(nTrees); delete trainee_highPt; //Apply Endcap Energy Regression intree->LoadTree(0); gbrapply.ApplyAsFriend(intree, forestee_highPt, *varsee, "targetee_highPt"); //Train Endcap Variance Regression (low pT) intree->LoadTree(0); const GBRForest *foresteevar_lowPt = traineevar_lowPt->TrainForest(nTrees); delete traineevar_lowPt; //Train Endcap Variance Regression (high pT) intree->LoadTree(0); const GBRForest *foresteevar_highPt = traineevar_highPt->TrainForest(nTrees); delete traineevar_highPt; TString fname; fname = outWeightFile; TFile *fout = new TFile(fname,"RECREATE"); cout << "Saving weights to file " << fname << endl; fout->WriteObject(foresteb_lowPt,"EBCorrection_lowPt"); fout->WriteObject(foresteb_highPt,"EBCorrection_highPt"); fout->WriteObject(forestebvar_lowPt,"EBUncertainty_lowPt"); fout->WriteObject(forestebvar_highPt,"EBUncertainty_highPt"); fout->WriteObject(forestee_lowPt,"EECorrection_lowPt"); fout->WriteObject(forestee_highPt,"EECorrection_highPt"); fout->WriteObject(foresteevar_lowPt,"EEUncertainty_lowPt"); fout->WriteObject(foresteevar_highPt,"EEUncertainty_highPt"); fout->WriteObject(varseb, "varlisteb"); fout->WriteObject(varsee, "varlistee"); // ftmp->Close(); // fout->Close(); } }