Double_t GetRand(TF1 *func, Double_t X_MIN, Double_t X_MAX, int seed ) { Double_t Y_MAX; Y_MAX = func->GetMaximum(); //std::cout << "Y_MAX=" << Y_MAX << std::endl; TRandom2* r = new TRandom3(seed); Double_t x =0; Double_t y = r->Uniform(); Double_t f_y = 3e+6; while(1) { x = r->Uniform(X_MIN, X_MAX); //std::cout << "x=" << x << std::endl; y = r->Uniform(0, Y_MAX); //std::cout << "y=" << y << std::endl; f_y = func->Eval(x); //std::cout << "f_y=" << f_y << std::endl; if(f_y > y) break; } //std::cout << "x budur=" << x << std::endl; return x; }
void ThreeDFit() { const int n = 1000; double x[n], y[n], z[n], v[n]; double ev = 0.1; // generate the data TRandom2 r; for (int i = 0; i < n; ++i) { x[i] = r.Uniform(0,10); y[i] = r.Uniform(0,10); z[i] = r.Uniform(0,10); v[i] = sin(x[i] ) + cos(y[i]) + z[i] + r.Gaus(0,ev); } // create a 3d binned data structure ROOT::Fit::BinData data(n,3); double xx[3]; for(int i = 0; i < n; ++i) { xx[0] = x[i]; xx[1] = y[i]; xx[2] = z[i]; // add the 3d-data coordinate, the predictor value (v[i]) and its errors data.Add(xx, v[i], ev); } TF3 * f3 = new TF3("f3","[0] * sin(x) + [1] * cos(y) + [2] * z",0,10,0,10,0,10); f3->SetParameters(2,2,2); ROOT::Fit::Fitter fitter; // wrapped the TF1 in a IParamMultiFunction interface for teh Fitter class ROOT::Math::WrappedMultiTF1 wf(*f3,3); fitter.SetFunction(wf); // bool ret = fitter.Fit(data); if (ret) { const ROOT::Fit::FitResult & res = fitter.Result(); // print result (should be around 1) res.Print(std::cout); // copy all fit result info (values, chi2, etc..) in TF3 f3->SetFitResult(res); // test fit p-value (chi2 probability) double prob = res.Prob(); if (prob < 1.E-2) Error("exampleFit3D","Bad data fit - fit p-value is %f",prob); else std::cout << "Good fit : p-value = " << prob << std::endl; } else Error("exampleFit3D","3D fit failed"); }
TH1* GenGeResponse(double keV0,TH1* hist,int N,double reskeV,double frakpeak,double fraccomp,double contshap,double cont){ TF1* GeResponse=GenGeResponseA(keV0,frakpeak,fraccomp,contshap,cont); TH1* response=hist; if(!response)response=new TH1D("GeResponse","GeResponse",8000,0,2000); double RES=reskeV*sqrt(keV0)/sqrt(500);//define res as res at 500keV TRandom2 rand;rand.SetSeed(); for(int i=0;i<N;i++){ double E=GeResponse->GetRandom(); //Res at peak is normal, everything below photopeak = very messy double res=pow(E/keV0,4);if(res>1)res=1; res=(3-(res*2))*reskeV; E+=rand.Gaus(0,res); response->Fill(E); } delete GeResponse; return response; }
int main( int argc, char * argv[]) { if( argc < 3) { std::cout << "usage : tupleMaker input-filenames output-filename <Unweight>" << std::endl; return 1; } std::string lastarg = std::string( argv[argc-1] ); int lastargisfilename = 0; if (lastarg.length() > 1 ){ lastargisfilename =1; } if (!lastargisfilename){ if( argc < 5) { std::cout << "usage : tupleMaker input-filenames output-filename <Unweight> <PDF reweight>" << std::endl; return 1; } } // // read in some control parameters // bool Unweight = 0; bool PDF_reweight = 0; if (!lastargisfilename){ Unweight = atoi( argv[argc-2] ); PDF_reweight = atoi( argv[argc-1] ); std::cout<<"Unweight = "<<Unweight<<" PDF reweight = "<<PDF_reweight<<std::endl; if(PDF_reweight==1) { std::cout<<"========================================================================================================"<<std::endl; std::cout<<"Need to make sure you have right PDF text files resbos_P/resbos_weights_PDF_*.dat"<<std::endl; std::cout<<"========================================================================================================"<<std::endl; } } // get input and output file names std::vector<std::string> in_files; int nInputs = argc-(2-lastargisfilename); if(PDF_reweight) nInputs = argc-(3-lastargisfilename); for( int i = 1; i < nInputs;++i) { in_files.push_back( std::string( argv[i] ) ); std::cout<<"Input file: "<<std::string(argv[i])<<std::endl; } int nOutput = argc-(2-lastargisfilename); if(PDF_reweight) nOutput = argc-(3-lastargisfilename); std::string out_filename = std::string( argv[nOutput] ); std::cout<<"Output file: "<<out_filename<<std::endl; // setup outputfile TFile * of = new TFile(out_filename.c_str(),"RECREATE"); if( of == 0) { std::cout << "Couldn't open output file : " << out_filename << std::endl; return 1; } Output * output = new Output(); output->Reset(); // initialise random number generator TRandom2 random; random.SetSeed(19742005); // loop over files Double_t StandardWeight =0; int nfiles = in_files.size(); if (Unweight){ std::cout << "All events will have weight one" << std::endl; std::cout << "Multiple files are merged using pass/fail based on the weight of events in the first file -- Standard Weight as shown below" << std::endl; } if (!Unweight) std::cout << "Unweighted events, Weights from generator are maintained"<< std::endl; // // read event weights for different PDF files // std::ifstream f_weights[45]; vector<float> pdfwgts; pdfwgts.resize(45); if(PDF_reweight) { char name[50]; for(int i=0; i<44; i++) { sprintf(name, "%s%d%s", "resbos_P/resbos_weights_PDF_", i+1, ".dat"); f_weights[i].open(name, std::ios::in); if( !f_weights[i] ) std::cout<<"Could not find the weight file "<<name<<std::endl; } } #ifdef __USE_PDFS_RESBOS__ Unweight = true; /// { std::ifstream tmp_file; tmp_file.open( "weights_00.hep" , std::ios::in ); if( tmp_file ) { int num ; double wgt ; while( tmp_file >> num >> wgt ) { if( wgt > StandardWeight ) StandardWeight = wgt; } tmp_file.close(); } } /// for( int i = 0 ; i < 45 ; i++ ) { TString name; name.Form( "weights_%02i.hep" , i ); f_weights[i].open( name.Data() , std::ios::in ); if( !f_weights[i] ) std::cout<<"Could not find the weight file "<<name<<std::endl; } #endif for(int i =0; i < nfiles; ++i) { // open file std::cout << "Processing file: " << in_files[i].c_str() << std::endl; std::ifstream f((in_files[i]).c_str()); if( !f ) { std::cout << "couldn't open file : " << in_files[i] << std::endl; return 1; } bool finished_file = false; // this loops over events while( ! finished_file ) { int evn; double evt_wt; double Q2,that,uhat,x1,x2,flav1,flav2 ; #ifdef __USE_PDFS__ f >> evn >> evt_wt >> Q2 >> that >> uhat >> x1 >> x2 >> flav1 >> flav2; #else f >> evn >> evt_wt; #endif if(evn % 100000==0) std::cout<<"Processing event: "<<evn<<std::endl; if( evn == 0 ) { finished_file = true; continue; } #ifdef __USE_PDFS_RESBOS__ for( int j = 0 ; j<45 ; j++ ) { double evn_tmp , wgt_tmp; f_weights[j] >> evn_tmp >> wgt_tmp; if( evn_tmp != evn ) { cout << " WRONG EVENT NUMBER!!!! " << j << " " << evn_tmp << " " << evn << endl; return 1; } if( j == 0 ) evt_wt = wgt_tmp; if( evt_wt > 0 && pdfwgts[j] > 0 ) pdfwgts[j] = wgt_tmp / evt_wt; else pdfwgts[j] = 1.0; if( pdfwgts[j] < 0 ) pdfwgts[j] = 0.0; } #endif float vx,vy,vz; f>> vx >> vy >>vz; bool finished_particles = false; if( !f.eof()) { if (StandardWeight == 0. && Unweight){ StandardWeight = evt_wt; std::cout << "Standard Weight = " << StandardWeight << std::endl; } } else { finished_file = true; } Bool_t keeper = kTRUE; if (Unweight){ Double_t weight_ratio= TMath::Abs(evt_wt/StandardWeight); if (weight_ratio > 1.){ std::cout << "This event has a weight = " << weight_ratio <<" times the value of Standard Weight" << std::endl; } if (random.Rndm() > weight_ratio) keeper = kFALSE; } if (Unweight) evt_wt = TMath::Abs(evt_wt) / evt_wt ; // // read event weight for each PDF set // double weight_PDF[44]; if(PDF_reweight) { for(int j=0; j<44; j++) (f_weights[j]) >> weight_PDF[i]; } // save PDF information into the root file // output->setPDFWeights(weight_PDF); if (keeper) { if(PDF_reweight) output->NewEvent( evn, evt_wt, 0 , vx,vy,vz, weight_PDF, 44); else { output->NewEvent( evn, evt_wt, 0 , vx,vy,vz , Q2 , x1 , x2 , flav1 , flav2 , pdfwgts ); } } //bool doFlip = false; //doFlip = ( random.Rndm() > 0.5); // this loops over particles in an event while( ! finished_particles && !f.eof()) { int id; f >> id; if( id == 0 ) finished_particles = true; else { float px,py,pz,E; f>> px >> py >> pz >> E; int origin, udk; f >> origin >> udk; //std::cout << id << " " << px << " " << py << " " << pz << " " << E << std::endl; // do the occaisional CP inversion // flip W+ -> W-, e+->e-, nu -> nu-bar and invert all momenta //if( doFlip ) { // id is isajet ids // if( id != 10 ) id = -id; // photon == anti-photon // px = -px; // py = -py; // pz = -pz; //} if(keeper) output->AddParticle( id, px,py,pz,E,origin); } } if (keeper){ output->Fill(); } if( f.eof()) finished_file = true; } f.close(); } output->Write(); of = output->Tree()->GetCurrentFile(); of->Close(); return 0; };
void plotFeedDown(int ntest=1, int centL=0,int centH=100) { // B cross-section TFile *inf = new TFile("output_pp_Bmeson_5TeV_y1.root"); // TFile *inf = new TFile("outputBplus_D_pp_rap24.root"); // TFile *inf = new TFile("outputBplus_pp.root"); TH1D *hBPtMax = (TH1D*)inf->Get("hmaxall"); TH1D *hBPtMin = (TH1D*)inf->Get("hminall"); TH1D *hBPt = (TH1D*)inf->Get("hpt"); hBPt->SetName("hBPt"); hBPtMax->SetName("hBPtMax"); hBPtMin->SetName("hBPtMin"); TH1D *hBMaxRatio = (TH1D*)hBPt->Clone("hBMaxRatio"); hBMaxRatio->Divide(hBPtMax); TH1D *hBMinRatio = (TH1D*)hBPt->Clone("hBMinRatio"); hBMinRatio->Divide(hBPtMin); hBPt->Rebin(1); // D cross-section // TFile *infD = new TFile("outputD0_D_pp.root"); TFile *infD = new TFile("output_pp_d0meson_5TeV_y1.root"); TH1D *hDPtMax = (TH1D*)infD->Get("hmaxall"); TH1D *hDPtMin = (TH1D*)infD->Get("hminall"); TH1D *hDPt = (TH1D*)infD->Get("hpt"); hDPt->SetName("hDPt"); hDPtMax->SetName("hDPtMax"); hDPtMin->SetName("hDPtMin"); hDPt->Rebin(1); // ratio of B->D0: not correct85% from PYTHIA //hBPt->Scale(0.85); hBPt->Scale(0.598); // c->D (55.7%) hDPt->Scale(0.557); TFile *inf2 = new TFile("/data/HeavyFlavourRun2/BtoDPythia/treefile_merged.root"); // TFile *inf2 = new TFile("test.root"); TTree *hi = (TTree*) inf2->Get("ana/hi"); hi->SetAlias("yD","log((sqrt(1.86484*1.86484+pt*pt*cosh(eta)*cosh(eta))+pt*sinh(eta))/sqrt(1.86484*1.86484+pt*pt))"); hi->SetAlias("yB","log((sqrt(5.3*5.3+pt*pt*cosh(eta)*cosh(eta))+pt*sinh(eta))/sqrt(5.3*5.3+pt*pt))"); hi->SetAlias("yJ","log((sqrt(3.09692*3.09692+pt*pt*cosh(eta)*cosh(eta))+pt*sinh(eta))/sqrt(3.09692*3.09692+pt*pt))"); // 6.5, 8, 10, 13, 30 /* TH1D *hBNoCut = (TH1D*)hBPt->Clone("hBNoCut"); TH1D *hBHasD = (TH1D*)hBPt->Clone("hBHasD"); hi->Draw("pt>>hBHasD","(abs(pdg)>500&&abs(pdg)<600&&abs(yB)<2.4)&&Sum$(abs(pdg)==421&&abs(yD)<2)>0"); hi->Draw("pt>>hBNoCut","(abs(pdg)>500&&abs(pdg)<600&&abs(yB)<2.4)"); ; hBNoCut->Divide(hBHasD); hBPt->Divide(hBNoCut); */ // 0-100% int npoint = 7; double ptBins_npjpsi[8] = {1,3,6.5,8,10,13,30,300}; double raa_npjpsi[7];// = {1,0.6, 0.52,0.43,0.43,0.34,0.5}; double raaStat_npjpsi[7];// = {1,0.4,0.12,0.08,0.09,0.07,0.5}; double raaSyst_npjpsi[7];// = {0,0,0.06,0.05,0.05,0.04,0}; /* 0-10, 10-20, 20-30, 30-40, 40-50, 50-100 double nonPromptJpsiRAA_2012[] = {0.,0.38,0.43,0.48,0.52,0.65,0.69}; double nonPromptJpsiRAAError_2012[] = {0.,0.02,0.03,0.03,0.04,0.06,0.07}; double nonPromptJpsiRAAErrorSyst_2012[] = {0.,0.04,0.05,0.05,0.06,0.07,0.07}; */ if (centL==0&¢H==100) { raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=0.6; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=0.4; // prelim raa_npjpsi[2]=0.52; raaStat_npjpsi[2]=0.12; raaSyst_npjpsi[2]=0.06; // np jpsi pas raa_npjpsi[3]=0.43; raaStat_npjpsi[3]=0.08; raaSyst_npjpsi[3]=0.05; // np jpsi pas raa_npjpsi[4]=0.43; raaStat_npjpsi[4]=0.09; raaSyst_npjpsi[4]=0.05; // np jpsi pas raa_npjpsi[5]=0.34; raaStat_npjpsi[5]=0.07; raaSyst_npjpsi[5]=0.04; // np jpsi pas raa_npjpsi[6]=0.5; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.25; // b-jet } if (centL==0&¢H==10) { raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=1.0; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=1.0; // no measurement raa_npjpsi[2]=0.38; raaStat_npjpsi[2]=0.02; raaSyst_npjpsi[2]=0.04; // np jpsi pas raa_npjpsi[3]=0.38; raaStat_npjpsi[3]=0.02; raaSyst_npjpsi[3]=0.04; // np jpsi pas raa_npjpsi[4]=0.38; raaStat_npjpsi[4]=0.02; raaSyst_npjpsi[4]=0.04; // np jpsi pas raa_npjpsi[5]=0.38; raaStat_npjpsi[5]=0.02; raaSyst_npjpsi[5]=0.04; // np jpsi pas raa_npjpsi[6]=0.39; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.20; // b-jet } if (centL==10&¢H==20) { raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=1.0; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=1.0; // no measurement raa_npjpsi[2]=0.43; raaStat_npjpsi[2]=0.03; raaSyst_npjpsi[2]=0.05; // np jpsi pas raa_npjpsi[3]=0.43; raaStat_npjpsi[3]=0.03; raaSyst_npjpsi[3]=0.05; // np jpsi pas raa_npjpsi[4]=0.43; raaStat_npjpsi[4]=0.03; raaSyst_npjpsi[4]=0.05; // np jpsi pas raa_npjpsi[5]=0.43; raaStat_npjpsi[5]=0.03; raaSyst_npjpsi[5]=0.05; // np jpsi pas raa_npjpsi[6]=0.47; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.24; // b-jet } if (centL==20&¢H==30) { raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=1.0; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=1.0; // no measurement raa_npjpsi[2]=0.48; raaStat_npjpsi[2]=0.03; raaSyst_npjpsi[2]=0.05; // np jpsi pas raa_npjpsi[3]=0.48; raaStat_npjpsi[3]=0.03; raaSyst_npjpsi[3]=0.05; // np jpsi pas raa_npjpsi[4]=0.48; raaStat_npjpsi[4]=0.03; raaSyst_npjpsi[4]=0.05; // np jpsi pas raa_npjpsi[5]=0.48; raaStat_npjpsi[5]=0.03; raaSyst_npjpsi[5]=0.05; // np jpsi pas raa_npjpsi[6]=0.47; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.24; // b-jet } if (centL==30&¢H==40) { raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=1.0; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=1.0; // no measurement raa_npjpsi[2]=0.52; raaStat_npjpsi[2]=0.04; raaSyst_npjpsi[2]=0.06; // np jpsi pas raa_npjpsi[3]=0.52; raaStat_npjpsi[3]=0.04; raaSyst_npjpsi[3]=0.06; // np jpsi pas raa_npjpsi[4]=0.52; raaStat_npjpsi[4]=0.04; raaSyst_npjpsi[4]=0.06; // np jpsi pas raa_npjpsi[5]=0.52; raaStat_npjpsi[5]=0.04; raaSyst_npjpsi[5]=0.06; // np jpsi pas raa_npjpsi[6]=0.61; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.30; // b-jet } if (centL==40&¢H==50) { raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=1.0; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=1.0; // no measurement raa_npjpsi[2]=0.65; raaStat_npjpsi[2]=0.06; raaSyst_npjpsi[2]=0.07; // np jpsi pas raa_npjpsi[3]=0.65; raaStat_npjpsi[3]=0.06; raaSyst_npjpsi[3]=0.07; // np jpsi pas raa_npjpsi[4]=0.65; raaStat_npjpsi[4]=0.06; raaSyst_npjpsi[4]=0.07; // np jpsi pas raa_npjpsi[5]=0.65; raaStat_npjpsi[5]=0.06; raaSyst_npjpsi[5]=0.07; // np jpsi pas raa_npjpsi[6]=0.61; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.30; // b-jet } if (centL==50&¢H==100) { raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=1.0; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=1.0; // no measurement raa_npjpsi[2]=0.69; raaStat_npjpsi[2]=0.07; raaSyst_npjpsi[2]=0.07; // np jpsi pas raa_npjpsi[3]=0.69; raaStat_npjpsi[3]=0.07; raaSyst_npjpsi[3]=0.07; // np jpsi pas raa_npjpsi[4]=0.69; raaStat_npjpsi[4]=0.07; raaSyst_npjpsi[4]=0.07; // np jpsi pas raa_npjpsi[5]=0.69; raaStat_npjpsi[5]=0.07; raaSyst_npjpsi[5]=0.07; // np jpsi pas raa_npjpsi[6]=0.70; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.35; // b-jet } if (centL==0&¢H==20) { //averaged by ncoll raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=1.0; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=1.0; // no measurement raa_npjpsi[2]=0.4; raaStat_npjpsi[2]=0.03; raaSyst_npjpsi[2]=0.05; // np jpsi pas raa_npjpsi[3]=0.4; raaStat_npjpsi[3]=0.03; raaSyst_npjpsi[3]=0.05; // np jpsi pas raa_npjpsi[4]=0.4; raaStat_npjpsi[4]=0.03; raaSyst_npjpsi[4]=0.05; // np jpsi pas raa_npjpsi[5]=0.4; raaStat_npjpsi[5]=0.03; raaSyst_npjpsi[5]=0.05; // np jpsi pas raa_npjpsi[6]=0.42; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.21; // b-jet } if (centL==10&¢H==30) { //averaged by ncoll raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=1.0; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=1.0; // no measurement raa_npjpsi[2]=0.45; raaStat_npjpsi[2]=0.03; raaSyst_npjpsi[2]=0.05; // np jpsi pas raa_npjpsi[3]=0.45; raaStat_npjpsi[3]=0.03; raaSyst_npjpsi[3]=0.05; // np jpsi pas raa_npjpsi[4]=0.45; raaStat_npjpsi[4]=0.03; raaSyst_npjpsi[4]=0.05; // np jpsi pas raa_npjpsi[5]=0.45; raaStat_npjpsi[5]=0.03; raaSyst_npjpsi[5]=0.05; // np jpsi pas raa_npjpsi[6]=0.47; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.24; // b-jet } if (centL==30&¢H==50) { //averaged by ncoll raa_npjpsi[0]=1.0; raaStat_npjpsi[0]=0.0; raaSyst_npjpsi[0]=1.0; // no measurement raa_npjpsi[1]=1.0; raaStat_npjpsi[1]=0.0; raaSyst_npjpsi[1]=1.0; // no measurement raa_npjpsi[2]=0.57; raaStat_npjpsi[2]=0.06; raaSyst_npjpsi[2]=0.07; // np jpsi pas raa_npjpsi[3]=0.57; raaStat_npjpsi[3]=0.06; raaSyst_npjpsi[3]=0.07; // np jpsi pas raa_npjpsi[4]=0.57; raaStat_npjpsi[4]=0.06; raaSyst_npjpsi[4]=0.07; // np jpsi pas raa_npjpsi[5]=0.57; raaStat_npjpsi[5]=0.06; raaSyst_npjpsi[5]=0.07; // np jpsi pas raa_npjpsi[6]=0.61; raaStat_npjpsi[6]=0.0; raaSyst_npjpsi[6]=0.30; // b-jet } TH1D *hNPJpsiRAA = new TH1D("hNPJpsiRAA","",npoint,ptBins_npjpsi); for (int i=1;i<=npoint;i++) { hNPJpsiRAA->SetBinContent(i,raa_npjpsi[i-1]); hNPJpsiRAA->SetBinError(i,sqrt(raaSyst_npjpsi[i-1]*raaSyst_npjpsi[i-1]+raaStat_npjpsi[i-1]*raaStat_npjpsi[i-1])); } TCanvas *cJpsiRAA = new TCanvas("cJpsiRAA","",600,600); cJpsiRAA->SetLogx(); TExec *setex2 = new TExec("setex2","gStyle->SetErrorX(0.5)"); setex2->Draw(); hNPJpsiRAA->SetXTitle("Non-prompt J/psi R_{AA} (GeV/c)"); hNPJpsiRAA->SetXTitle("Non-prompt J/psi p_{T} (GeV/c)"); hNPJpsiRAA->SetYTitle("R_{AA}"); hNPJpsiRAA->Draw("e1"); TCanvas *c = new TCanvas("c","",600,600); TH2D *hJpsi= new TH2D("hJpsi","",hBPt->GetNbinsX(),hBPt->GetBinLowEdge(1),hBPt->GetBinLowEdge(hBPt->GetNbinsX()+1), 299*4,1,300); TH2D *hD= new TH2D("hD","",hBPt->GetNbinsX(),hBPt->GetBinLowEdge(1),hBPt->GetBinLowEdge(hBPt->GetNbinsX()+1), 299*4,1,300); hi->Draw("pt:BPt>>hJpsi","pdg==443&&BPt>0&&abs(yJ)<1"); hi->Draw("pt:BPt>>hD","abs(pdg)==421&&BPt>0&&abs(yD)<1"); hJpsi->Sumw2(); hD->Sumw2(); reweighthisto(hBPt,hD); reweighthisto(hBPt,hJpsi); hJpsi->ProjectionY()->Draw("hist"); hD->SetLineColor(4); hD->SetMarkerColor(4); hD->ProjectionY()->Draw("hist same"); hBPt->Draw("hist same"); hJpsi->SetXTitle("B p_{T} (GeV/c)"); hJpsi->SetYTitle("J/#psi p_{T} (GeV/c)"); hD->SetXTitle("B p_{T} (GeV/c)"); hD->SetYTitle("D^{0} p_{T} (GeV/c)"); TCanvas *c2= new TCanvas("c2","B RAA band",600,600); TRandom2 rnd; // hJpsi ->ProjectionX()->Draw("hist"); TH2D *hRAATmp = new TH2D("hRAATmp","",97,3,100,100,0,2); hRAATmp->SetXTitle("B p_{T} (GeV/c)"); hRAATmp->SetYTitle("R_{AA}"); hRAATmp->Draw(); TCanvas *c3= new TCanvas("c3","D RAA band",600,600); TH2D *hDRAATmp = new TH2D("hDRAATmp","",47,3,50,100,0,2); hDRAATmp->SetXTitle("D^{0} p_{T} (GeV/c)"); hDRAATmp->SetYTitle("R_{AA}"); hDRAATmp->Draw(); TCanvas *c4= new TCanvas("c4","B->D fraction band",600,600); TH2D *hBtoDTmp = new TH2D("hBtoDTmp","",47,3,50,100,0,2); hBtoDTmp->SetXTitle("D^{0} p_{T} (GeV/c)"); hBtoDTmp->SetYTitle("Non-prompt D fraction"); hBtoDTmp->Draw(); TH1D *hDFromBPt= (TH1D*)hD->ProjectionY()->Clone("hDFromBPt"); TH1D *hDFromBPtFraction= (TH1D*)hD->ProjectionY()->Clone("hDFromBPtFraction"); hDFromBPtFraction->Divide(hDPt); TH1D *hDFromBPtMax= (TH1D*)hD->ProjectionY()->Clone("hDFromBPtMax"); TH1D *hDFromBPtMin= (TH1D*)hD->ProjectionY()->Clone("hDFromBPtMin"); setHist(hDFromBPtMax,-1e10); setHist(hDFromBPtMin,1e10); for (int i=0;i<ntest;i++) { if (i%10==0) cout <<i<<endl; TH1D *hRAASample = (TH1D*)hNPJpsiRAA->Clone(Form( "hRAASample_%d",i)); for (int j=1;j<=hRAASample->GetNbinsX();j++) { double RAA = (rnd.Rndm()*2-1)*hNPJpsiRAA->GetBinError(j)+hNPJpsiRAA->GetBinContent(j); hRAASample->SetBinContent(j,RAA); } TH2D *hJpsiClone = (TH2D*)hJpsi->Clone(Form("hJpsiClone_%d",i)); reweighthisto(hBPt,hJpsiClone,hRAASample,1); TH1D *hBRAA = hJpsiClone->ProjectionX(Form("hBRAA_%d",i)); c2->cd(); hBRAA->Divide(hBPt); hBRAA->SetLineWidth(3); hBRAA->SetLineColor(kGray); hBRAA->Rebin(4); hBRAA->Scale(1./4.); hBRAA->Draw("hist c same"); delete hJpsiClone; TH2D *hDClone = (TH2D*)hD->Clone(Form("hDClone_%d",i)); reweighthisto(hBPt,hDClone,hBRAA,0,1); TH1D *hDRAA = hDClone->ProjectionY(Form("hDRAA_%d",i)); getMaximum(hDFromBPtMax,hDRAA); getMinimum(hDFromBPtMin,hDRAA); c3->cd(); hDRAA->Divide(hDFromBPt); hDRAA->SetLineWidth(3); hDRAA->SetLineColor(kGray); hDRAA->Draw("hist c same"); c4->cd(); TH1D *hBtoDFrac = hDClone->ProjectionY(Form("hBtoDFrac_%d",i)); hBtoDFrac->Divide(hDPt); hBtoDFrac->SetLineWidth(3); hBtoDFrac->SetLineColor(kGray); hBtoDFrac->Draw("hist same"); delete hDClone; // delete hBRAA; // delete hDRAA; } TFile *outf = new TFile(Form("BtoD-%d-%d.root",centL,centH),"recreate"); TH1D *hDFromBPtCentral=(TH1D*)hDFromBPtMax->Clone("hDFromBPtCentral"); hDFromBPtCentral->Add(hDFromBPtMin); hDFromBPtCentral->Scale(1./2); hNPJpsiRAA->Write(); hDFromBPtMax->Write(); hDFromBPtMin->Write(); hDFromBPtCentral->Write(); hDFromBPt->Write(); hJpsi->Write(); hD->Write(); hDFromBPtFraction->Write(); outf->Write(); }
void write(int n) { TRandom2 R; TStopwatch timer; R.SetSeed(1); timer.Start(); double s = 0; for (int i = 0; i < n; ++i) { s += R.Gaus(0,10); s += R.Gaus(0,10); s += R.Gaus(0,10); s += R.Gaus(100,10); } timer.Stop(); std::cout << s/double(n) << std::endl; std::cout << " Time for Random gen " << timer.RealTime() << " " << timer.CpuTime() << std::endl; TFile f1("mathcoreVectorIO_1.root","RECREATE"); // create tree TTree t1("t1","Tree with new LorentzVector"); XYZTVector *v1 = new XYZTVector(); t1.Branch("LV branch","ROOT::Math::XYZTVector",&v1); R.SetSeed(1); timer.Start(); for (int i = 0; i < n; ++i) { double Px = R.Gaus(0,10); double Py = R.Gaus(0,10); double Pz = R.Gaus(0,10); double E = R.Gaus(100,10); //CylindricalEta4D<double> & c = v1->Coordinates(); //c.SetValues(Px,pY,pZ,E); v1->SetCoordinates(Px,Py,Pz,E); t1.Fill(); } f1.Write(); timer.Stop(); std::cout << " Time for new Vector " << timer.RealTime() << " " << timer.CpuTime() << std::endl; t1.Print(); // create tree with old LV TFile f2("mathcoreVectorIO_2.root","RECREATE"); TTree t2("t2","Tree with TLorentzVector"); TLorentzVector * v2 = new TLorentzVector(); TLorentzVector::Class()->IgnoreTObjectStreamer(); TVector3::Class()->IgnoreTObjectStreamer(); t2.Branch("TLV branch","TLorentzVector",&v2,16000,2); R.SetSeed(1); timer.Start(); for (int i = 0; i < n; ++i) { double Px = R.Gaus(0,10); double Py = R.Gaus(0,10); double Pz = R.Gaus(0,10); double E = R.Gaus(100,10); v2->SetPxPyPzE(Px,Py,Pz,E); t2.Fill(); } f2.Write(); timer.Stop(); std::cout << " Time for old Vector " << timer.RealTime() << " " << timer.CpuTime() << endl; t2.Print(); }
void readinhist(){ ifstream infile("sn124p_250.out"); ifstream infile("gampi_sn120_n_fsu30_187.out"); Double_t t1[1800],t2[1800],t3[1800],t4[1800],t5[1800],t6[1800]; char dummy; for(int j = 0; j< 1800; j++){ t1[j] = .0; t2[j] = .0; t3[j] = .0; t4[j] = .0; t6[j] = 0.0; } if(!infile){ cout << "Cannot open file!!!" << endl; } else { for(int i=0; i< 1800; i++){ //while(!infile.eof()){ infile >> dummy >> t1[i] >> t2[i] >> t3[i] >> t4[i]; t5[i] = t1[i] - 0.05; } } for(i=0; i<1800; i++) cout << "t1[" << i << "]: " << t1[i] << ", t2[" << i << "]: " << t2[i] <<endl; TGraph *gr1 = new TGraph(1800,t5,t2); TRandom2 *fRandom = new TRandom2(0); Double_t sigma_res = 2.0; Double_t mean_res = 90.05; // just to have everything inside bin center is bin 901 TH1F *testH1 = new TH1F("testH1","testH1",1800,0.0,180.0); Int_t n_trials = 100000; Double_t val_random; //Creating random distribution with the right sigma for (int i=0 ; i<n_trials; i++) { val_random = fRandom->Gaus(mean_res,sigma_res); testH1->Fill(val_random); } Int_t n_counts = int(3*sigma_res/testH1->GetBinWidth(1)); // I will count from -3sigma to 3sigma for (int i=0; i<1800; i++) { for (int j=-n_counts; j<n_counts ; j++) { if ((i+j)>=0 && (i+j)<1800) { t6[i] = t6[i] + t2[i+j] * double(testH1->GetBinContent(901+j)) / double(n_trials); } } } TGraph *gr2 = new TGraph(1800,t5,t6); TFile *fout = NULL; if( fout )delete fout; fout = new TFile("sn124p_250.root","RECREATE"); gr2->Write(); TCanvas *c1 = NULL; if( c1 )delete c1; c1 = new TCanvas("c1"); gr2->Draw(""); }
// this is the main program, no need to do anything here int main(int argc, char* argv[]) { // sum of squares error info for histograms TH1D::SetDefaultSumw2(); // create analysis // NoiseSystematics analyser(argc, argv); // analyser.run(); TFile* ofile = new TFile("NoiseSystematics.root", "RECREATE"); std::cout << "Output file : " << ofile->GetName() << std::endl; if (ofile->IsZombie()) { std::exit(2); } TH1F hcosmic("hcosmic","",10,0,10); TH1F hobs("hobs",";# expected events;",10,0,10); TH1F hnoise("hnoise","",10,0,10); TRandom2 rndm; for (int i = 0; i < 10000; i++) { // DT by RPC background: 0.96061 +/- 0.559146 double ncos = rndm.Poisson(rndm.Gaus (0.96, 0.56)); double nobs = rndm.PoissonD(2.); hcosmic.Fill(ncos); hobs.Fill(nobs); hnoise.Fill(nobs-ncos); //std::cout << ncos << "\t" << nobs << "\t" << nobs-ncos << std::endl; } double nq = 1; Double_t xq[1] = {0.68}; // position where to compute the quantiles in [0,1] Double_t yq[1] = {0.}; // array to contain the quantiles hnoise.GetQuantiles(nq,yq,xq); std::cout << xq[0] << "\t" << yq[0] << std::endl; ofile->cd(); ofile->Write("",TObject::kOverwrite); TCanvas c("c"); hnoise.SetStats(0); hobs.SetStats(0); hcosmic.SetStats(0); hcosmic.SetLineColor(kRed); //hcosmic.Scale(1.4); hobs.SetLineColor(kBlue); hnoise.SetLineColor(kBlack); hcosmic.SetMaximum(hcosmic.GetMaximum()*1.1); hcosmic.Draw("hist"); hobs.Draw("hist same"); hnoise.Draw("hist same"); c.Update(); TLine line(yq[0],0,yq[0],hcosmic.GetMaximum()); line.SetLineColor(kBlack); line.SetLineStyle(5); line.SetLineWidth(2); line.Draw(); TLegend leg(0.6, 0.70, 0.88, 0.85); leg.SetFillColor(kWhite); leg.AddEntry("hcosmic", "N_{cosmic}", "l"); leg.AddEntry("hobs", "N_{obs} [#lambda = 2]","l"); leg.AddEntry("hnoise", "n_{noise} = N_{obs} - N_{cosmic}","l"); leg.SetBorderSize(0); leg.Draw(); c.SaveAs("NoiseSystematics.pdf"); return 0; }
static inline double GausAdd2(double val, double sigma) { return rnd.Gaus(val, val*sigma); };
static inline double GausAdd(double val, double sigma) { return rnd.Gaus(val, sqrt(val)*sigma); };
static inline double Gaus(double mean = 0, double sigma = 1) { return rnd.Gaus(mean, sigma); };
void limit() { //This program demonstrates the computation of 95 % C.L. limits. //It uses a set of randomly created histograms. // //Author: [email protected] on 21.08.02 // Create a new canvas. TCanvas *c1 = new TCanvas("c1","Dynamic Filling Example",200,10,700,500); c1->SetFillColor(42); // Create some histograms TH1D* background = new TH1D("background","The expected background",30,-4,4); TH1D* signal = new TH1D("signal","the expected signal",30,-4,4); TH1D* data = new TH1D("data","some fake data points",30,-4,4); background->SetFillColor(48); signal->SetFillColor(41); data->SetMarkerStyle(21); data->SetMarkerColor(kBlue); background->Sumw2(); // needed for stat uncertainty signal->Sumw2(); // needed for stat uncertainty // Fill histograms randomly TRandom2 r; Float_t bg,sig,dt; for (Int_t i = 0; i < 25000; i++) { bg = r.Gaus(0,1); sig = r.Gaus(1,.2); background->Fill(bg,0.02); signal->Fill(sig,0.001); } for (Int_t i = 0; i < 500; i++) { dt = r.Gaus(0,1); data->Fill(dt); } THStack *hs = new THStack("hs","Signal and background compared to data..."); hs->Add(background); hs->Add(signal); hs->Draw("hist"); data->Draw("PE1,Same"); c1->Modified(); c1->Update(); c1->GetFrame()->SetFillColor(21); c1->GetFrame()->SetBorderSize(6); c1->GetFrame()->SetBorderMode(-1); c1->Modified(); c1->Update(); gSystem->ProcessEvents(); // Compute the limits cout << "Computing limits... " << endl; TLimitDataSource* mydatasource = new TLimitDataSource(signal,background,data); TConfidenceLevel *myconfidence = TLimit::ComputeLimit(mydatasource,50000); cout << "CLs : " << myconfidence->CLs() << endl; cout << "CLsb : " << myconfidence->CLsb() << endl; cout << "CLb : " << myconfidence->CLb() << endl; cout << "< CLs > : " << myconfidence->GetExpectedCLs_b() << endl; cout << "< CLsb > : " << myconfidence->GetExpectedCLsb_b() << endl; cout << "< CLb > : " << myconfidence->GetExpectedCLb_b() << endl; // Add stat uncertainty cout << endl << "Computing limits with stat systematics... " << endl; TConfidenceLevel *mystatconfidence = TLimit::ComputeLimit(mydatasource,50000,true); cout << "CLs : " << mystatconfidence->CLs() << endl; cout << "CLsb : " << mystatconfidence->CLsb() << endl; cout << "CLb : " << mystatconfidence->CLb() << endl; cout << "< CLs > : " << mystatconfidence->GetExpectedCLs_b() << endl; cout << "< CLsb > : " << mystatconfidence->GetExpectedCLsb_b() << endl; cout << "< CLb > : " << mystatconfidence->GetExpectedCLb_b() << endl; // Add some systematics cout << endl << "Computing limits with systematics... " << endl; TVectorD errorb(2); TVectorD errors(2); TObjArray* names = new TObjArray(); TObjString name1("bg uncertainty"); TObjString name2("sig uncertainty"); names->AddLast(&name1); names->AddLast(&name2); errorb[0]=0.05; // error source 1: 5% errorb[1]=0; // error source 2: 0% errors[0]=0; // error source 1: 0% errors[1]=0.01; // error source 2: 1% TLimitDataSource* mynewdatasource = new TLimitDataSource(); mynewdatasource->AddChannel(signal,background,data,&errors,&errorb,names); TConfidenceLevel *mynewconfidence = TLimit::ComputeLimit(mynewdatasource,50000,true); cout << "CLs : " << mynewconfidence->CLs() << endl; cout << "CLsb : " << mynewconfidence->CLsb() << endl; cout << "CLb : " << mynewconfidence->CLb() << endl; cout << "< CLs > : " << mynewconfidence->GetExpectedCLs_b() << endl; cout << "< CLsb > : " << mynewconfidence->GetExpectedCLsb_b() << endl; cout << "< CLb > : " << mynewconfidence->GetExpectedCLb_b() << endl; // show canonical -2lnQ plots in a new canvas // - The histogram of -2lnQ for background hypothesis (full) // - The histogram of -2lnQ for signal and background hypothesis (dashed) TCanvas *c2 = new TCanvas("c2"); myconfidence->Draw(); // clean up (except histograms and canvas) delete myconfidence; delete mydatasource; delete mystatconfidence; delete mynewconfidence; delete mynewdatasource; }