Minimizer QuadraticLineMinimizer(Functional f, const GridDescription &gd, double kT, VectorXd *data, const VectorXd &direction, double gradDotDirection, double *step) { //if (gradDotDirection > 0) { // printf("The slope is backwards!!!\n"); // assert(gradDotDirection < 0); //} return Minimizer(new QuadraticLineMinimizerType(f, gd, kT, data, direction, gradDotDirection, step)); }
int CalclFilmParamsTM(FilmFuncTMParams& in, FilmParams& out) { FilmMinimizerTM Minimizer(in,200); // out.status=Minimizer.Run(FilmParams(in.bettaexp[0],1020.), FilmParams(1e-4,1e-1), 1e-6); out.status=Minimizer.Run(FilmParams(1.8,1250.), FilmParams(1e-4,1e-1), 1e-6); out=Minimizer.roots; out.dt=Minimizer.dt; out.func_call_cntr=FilmMinimizerTM::func_call_cntr; out.epsabs=Minimizer.epsabs; out.fval=Minimizer.fval; out.size=Minimizer.size; return out.status; }
void Scene::TraceImage(Color* image, const int pass) { // realtime->run(); // Remove this (realtime stuff) std::vector<Shape*> bboxes; for (auto i : shapes) { bboxes.push_back(new Box(i->bbox().corner(Box3d::BottomLeftFloor), i->bbox().corner(Box3d::TopRightCeil) - i->bbox().corner(Box3d::BottomLeftFloor) ,currentMat)); } KdBVH<float, 3, Shape*> tree(shapes.begin(), shapes.end()); // Build unit vector for camera space. float rx = camera->ry * width / height; Vector3f camX = rx * camera->orient._transformVector(Vector3f::UnitX()); Vector3f camY = camera->ry * camera->orient._transformVector(Vector3f::UnitY()); Vector3f camZ = -1 * camera->orient._transformVector(Vector3f::UnitZ()); //fprintf(stderr, "Rendering Starts.\n¡¾Render Pass¡¿%d\n¡¾Resolution¡¿%d ¡Á %d\n", MAX_PASS, width, height); for (int pass = 0; pass < MAX_PASS; pass++) { #pragma omp parallel for schedule(dynamic, 1) // Magic: Multi-thread y loop for (int y = 0; y < height - 1; ++y) { for (int x = 0; x < width - 1; ++x) { //fprintf(stderr, "Progress: %2d%%, current Pass: %4d\r", pass * 100 / MAX_PASS, pass+1); //fprintf(stderr, "Rendering Pass: %d, y: %4d, x: %4d\r",pass, y, x); // Variable decleration Color color; Vector3f rayDir, Weight, expWeight, brdf; Ray ray, shadowRay; float ProbLightSample, ProbBRDFSample, MIS; Minimizer minimizer, shadowMinimizer; Intersection *pCurrentIt, *pShadowIt, expLight, lastIt; float ProbDiffuse; float ProbSpecular; float ProbTransmission; // define the ray // transform x and y to [-1,1] screen space float dx = (x + myrandom(RNGen)) / width * 2 - 1; float dy = (y + myrandom(RNGen)) / height * 2 - 1; rayDir = dx*camX + dy*camY + camZ; rayDir.normalize(); ray = Ray(camera->eye, rayDir); // trace the initial ray minimizer = Minimizer(ray); pCurrentIt = BVMinimize(tree, minimizer) == FLT_MAX ? NULL : &minimizer.minIt; // compute the color // reset color and weights color = Vector3f(0.0f, 0.0f, 0.0f); Weight = Vector3f(1.0f, 1.0f, 1.0f); // Compute brdf if intersection exists and is not a light source if (pCurrentIt) { if (!pCurrentIt->pS->mat->isLight()) { while (myrandom(RNGen) < RUSSIAN_ROULETTE) { Vector3f wo = -ray.D; float KdNorm = pCurrentIt->pS->mat->Kd.norm(); float KsNorm = pCurrentIt->pS->mat->Ks.norm(); float KtNorm = pCurrentIt->pS->mat->Kt.norm(); ProbDiffuse = KdNorm / (KdNorm + KsNorm + KtNorm); ProbSpecular = KsNorm / (KdNorm + KsNorm + KtNorm); ProbTransmission = KtNorm / (KdNorm + KsNorm + KtNorm); // Explicit Sampling (Light Sampling) -------------------------------------------------------------- expLight = sampleLight(); rayDir = expLight.pos - pCurrentIt->pos; rayDir.normalize(); shadowRay = Ray(pCurrentIt->pos, rayDir); shadowMinimizer = Minimizer(shadowRay); pShadowIt = BVMinimize(tree, shadowMinimizer) == FLT_MAX ? NULL : &shadowMinimizer.minIt; //if (pShadowIt && (pShadowIt->pos - expLight.pos).squaredNorm() < epsilon) { if (pShadowIt && pShadowIt->pS == expLight.pS) { ProbLightSample = pdfLight(expLight) / geomertryFactor(*pCurrentIt, expLight); ProbBRDFSample = pdfBrdf(*pCurrentIt, shadowRay.D, wo, ProbDiffuse, ProbSpecular, ProbTransmission) * RUSSIAN_ROULETTE; MIS = ProbLightSample * ProbLightSample / (ProbLightSample * ProbLightSample + ProbBRDFSample * ProbBRDFSample); brdf = fabs(pCurrentIt->normal.dot(shadowRay.D)) * evalBrdf(*pCurrentIt, shadowRay.D, wo, ProbDiffuse, ProbSpecular, ProbTransmission, pShadowIt->t); expWeight = Weight.cwiseProduct(brdf / ProbLightSample); color += MIS * (Color)(expWeight.cwiseProduct(expLight.pS->mat->color)); } // ------------------------------------------------------------------------------------------------- // Implicit Sampling (BRDF Sampling) --------------------------------------------------------------- // Save current intersection info and extend path ray.Q = pCurrentIt->pos; //do { ray.D = sampleBrdf(*pCurrentIt, wo); } while (pCurrentIt->normal.dot(ray.D) < epsilon); ray.D = sampleBrdf(*pCurrentIt, wo, ProbDiffuse, ProbSpecular); //if (pCurrentIt->normal.dot(ray.D) < epsilon) break; lastIt = *pCurrentIt; minimizer = Minimizer(ray); if (BVMinimize(tree, minimizer) == FLT_MAX) break; else { if ((ProbBRDFSample = pdfBrdf(lastIt, ray.D, wo, ProbDiffuse, ProbSpecular, ProbTransmission) * RUSSIAN_ROULETTE) < epsilon) break; brdf = fabs(lastIt.normal.dot(ray.D)) * evalBrdf(lastIt,ray.D, wo, ProbDiffuse, ProbSpecular, ProbTransmission, pCurrentIt->t); Weight = Weight.cwiseProduct(brdf / ProbBRDFSample); if (pCurrentIt->pS->mat->isLight()) { ProbLightSample = pdfLight(*pCurrentIt) / geomertryFactor(lastIt, *pCurrentIt); MIS = ProbBRDFSample * ProbBRDFSample / (ProbLightSample * ProbLightSample + ProbBRDFSample * ProbBRDFSample); color += MIS * (Color)(Weight.cwiseProduct(pCurrentIt->pS->mat->color)); break; } } // ------------------------------------------------------------------------------------------------- } } // Use pure color for light sources else color = pCurrentIt->pS->mat->color; } image[y*width + x] += color/(float)MAX_PASS; } } //fprintf(stderr, "\n"); // Out HDR Image whenever Pass is 2 exponential. if (pass == 0 || pass == 3 || pass == 15 || pass == 63 || pass == 255 || pass == 1023 || pass == 4095){ char filename[32]; sprintf(filename, "Image_Pass_%d.hdr", pass + 1); std::string hdrName = filename; // Write the image WriteHdrImage(hdrName, width, height, image); } } }
Minimizer PreconditionedConjugateGradient(Functional f, const GridDescription &gdin, double kT, VectorXd *data, LineMinimizer lm, double stepsize) { return Minimizer(new PreconditionedConjugateGradientType(f, gdin, kT, data, lm, stepsize)); }
void MuScale() { //-------------------------------------------------------------------------------------------------------------- // Settings //============================================================================================================== // event category enumeration enum { eMuMu2HLT=1, eMuMu1HLT1L1, eMuMu1HLT, eMuMuNoSel, eMuSta, eMuTrk }; // event category enum TString outputDir = "MuScaleResults"; vector<TString> infilenamev; infilenamev.push_back("/afs/cern.ch/work/c/cmedlock/public/wz-ntuples/Zmumu/ntuples/data_select.trkCuts.root"); // data infilenamev.push_back("/afs/cern.ch/work/c/cmedlock/public/wz-ntuples/Zmumu/ntuples/zmm_select.raw.trkCuts.root"); // MC const Double_t MASS_LOW = 60; const Double_t MASS_HIGH = 120; const Double_t PT_CUT = 25; const Double_t ETA_CUT = 2.4; const Double_t MU_MASS = 0.105658369; vector<pair<Double_t,Double_t> > scEta_limits; scEta_limits.push_back(make_pair(0.0,1.2)); scEta_limits.push_back(make_pair(1.2,2.1)); scEta_limits.push_back(make_pair(2.1,2.4)); CPlot::sOutDir = outputDir; const TString format("png"); //-------------------------------------------------------------------------------------------------------------- // Main analysis code //============================================================================================================== enum { eData=0, eMC }; char hname[100]; vector<TH1D*> hMCv, hDatav; for(UInt_t ibin=0; ibin<scEta_limits.size(); ibin++) { for(UInt_t jbin=ibin; jbin<scEta_limits.size(); jbin++) { sprintf(hname,"mc_%i_%i",ibin,jbin); hMCv.push_back(new TH1D(hname,"",80,MASS_LOW,MASS_HIGH)); hMCv.back()->Sumw2(); sprintf(hname,"data_%i_%i",ibin,jbin); hDatav.push_back(new TH1D(hname,"",80,MASS_LOW,MASS_HIGH)); hDatav.back()->Sumw2(); } } // // Declare output ntuple variables // UInt_t runNum, lumiSec, evtNum; Float_t scale1fb, puWeight; UInt_t matchGen; UInt_t category; UInt_t npv, npu; Int_t q1, q2; TLorentzVector *dilep=0, *lep1=0, *lep2=0; for(UInt_t ifile=0; ifile<infilenamev.size(); ifile++) { cout << "Processing " << infilenamev[ifile] << "..." << endl; TFile *infile = TFile::Open(infilenamev[ifile]); assert(infile); TTree *intree = (TTree*)infile->Get("Events"); assert(intree); intree->SetBranchAddress("runNum", &runNum); // event run number intree->SetBranchAddress("lumiSec", &lumiSec); // event lumi section intree->SetBranchAddress("evtNum", &evtNum); // event number intree->SetBranchAddress("scale1fb", &scale1fb); // event weight intree->SetBranchAddress("puWeight", &puWeight); // pileup reweighting intree->SetBranchAddress("matchGen", &matchGen); // event has both leptons matched to MC Z->ll intree->SetBranchAddress("category", &category); // dilepton category intree->SetBranchAddress("npv", &npv); // number of primary vertices intree->SetBranchAddress("npu", &npu); // number of in-time PU events (MC) intree->SetBranchAddress("q1", &q1); // charge of lead lepton intree->SetBranchAddress("q2", &q2); // charge of trail lepton intree->SetBranchAddress("dilep", &dilep); // dilepton 4-vector intree->SetBranchAddress("lep1", &lep1); // lead lepton 4-vector intree->SetBranchAddress("lep2", &lep2); // trail lepton 4-vector for(UInt_t ientry=0; ientry<intree->GetEntries(); ientry++) { intree->GetEntry(ientry); Double_t weight = 1; if(ifile==eMC) { //if(!matchGen) continue; weight=scale1fb*puWeight*1.1*TMath::Power(10,7)/5610.0; } if((category!=eMuMu2HLT) && (category!=eMuMu1HLT) && (category!=eMuMu1HLT1L1)) continue; if(q1 == q2) continue; if(dilep->M() < MASS_LOW) continue; if(dilep->M() > MASS_HIGH) continue; if(lep1->Pt() < PT_CUT) continue; if(lep2->Pt() < PT_CUT) continue; if(fabs(lep1->Eta()) > ETA_CUT) continue; if(fabs(lep2->Eta()) > ETA_CUT) continue; TLorentzVector vLep1(0,0,0,0); TLorentzVector vLep2(0,0,0,0); vLep1.SetPtEtaPhiM(lep1->Pt(), lep1->Eta(), lep1->Phi(), MU_MASS); vLep2.SetPtEtaPhiM(lep2->Pt(), lep2->Eta(), lep2->Phi(), MU_MASS); TLorentzVector vDilep = vLep1 + vLep2; Int_t bin1=-1, bin2=-1; for(UInt_t i=0; i<scEta_limits.size(); i++) { Double_t etalow = scEta_limits.at(i).first; Double_t etahigh = scEta_limits.at(i).second; if(fabs(lep1->Eta())>=etalow && fabs(lep1->Eta())<=etahigh) bin1=i; if(fabs(lep2->Eta())>=etalow && fabs(lep2->Eta())<=etahigh) bin2=i; } assert(bin1>=0); assert(bin2>=0); Int_t ibin= (bin1<=bin2) ? bin1 : bin2; Int_t jbin= (bin1<=bin2) ? bin2 : bin1; UInt_t n=jbin-ibin; for(Int_t k=0; k<ibin; k++) n+=(scEta_limits.size()-k); if(ifile==eData) hDatav[n]->Fill(vDilep.M(),weight); if(ifile==eMC) hMCv[n]->Fill(vDilep.M(),weight); } delete infile; infile=0, intree=0; } // // Fit for energy scale and resolution corrections // char vname[100]; // buffer for RooFit object names char pname[100]; char str1[100]; char str2[100]; TCanvas *c = MakeCanvas("c","c",800,600); // Dummy histograms for TLegend (I can't figure out how to properly pass RooFit objects...) TH1D *hDummyData = new TH1D("hDummyData","",0,0,10); hDummyData->SetMarkerStyle(kFullCircle); hDummyData->SetMarkerSize(0.9); TH1D *hDummyMC = new TH1D("hDummyMC","",0,0,10); hDummyMC->SetLineColor(kBlue); hDummyMC->SetFillColor(kBlue); hDummyMC->SetFillStyle(3002); TH1D *hDummyFit = new TH1D("hDummyFit","",0,0,10); hDummyFit->SetLineColor(kGreen+2); RooRealVar mass("mass","M_{#mu#mu}",60.0,120.0,"GeV") ; mass.setBins(1600,"cache"); RooRealVar massmc("massmc","massmc",0.0,150.0,"GeV"); // mass variable for building MC template RooCategory zscEta_cat("zscEta_cat","zscEta_cat"); RooSimultaneous combscalefit("combscalefit","combscalefit",zscEta_cat); map<string,TH1*> hmap; // Mapping of category labels and data histograms RooArgList scalebins; // List of RooRealVars storing per bin energy scale corrections RooArgList sigmabins; // List of RooRealVars storing per bin energy resolution corrections Int_t intOrder = 1; // Interpolation order for for(UInt_t ibin=0; ibin<scEta_limits.size(); ibin++) { sprintf(vname,"scale_%i",ibin); RooRealVar *scalebinned = new RooRealVar(vname,vname,1.0,0.5,1.5); scalebins.add(*scalebinned); sprintf(vname,"sigma_%i",ibin); RooRealVar *sigmabinned = new RooRealVar(vname,vname,1.0,0.0,2.0); sigmabins.add(*sigmabinned); } for(UInt_t ibin=0; ibin<scEta_limits.size(); ibin++) { for(UInt_t jbin=ibin; jbin<scEta_limits.size(); jbin++) { UInt_t n=jbin-ibin; for(UInt_t k=0; k<ibin; k++) n+=(scEta_limits.size()-k); sprintf(vname,"masslinearshifted_%i_%i",ibin,jbin); RooFormulaVar *masslinearshifted = new RooFormulaVar(vname,vname,"sqrt(@0*@1)",RooArgList(*scalebins.at(ibin),*scalebins.at(jbin))); sprintf(vname,"massshiftedscEta_%i_%i",ibin,jbin); RooLinearVar *massshiftedscEta = new RooLinearVar(vname,vname,mass,*masslinearshifted,RooConst(0.0)); // MC-based template sprintf(vname,"zmassmcscEta_%i_%i",ibin,jbin); RooDataHist *zmassmcscEta = new RooDataHist(vname,vname,RooArgList(massmc),hMCv[n]); sprintf(vname,"masstemplatescEta_%i_%i",ibin,jbin); RooHistPdf *masstemplatescEta = new RooHistPdf(vname,vname,RooArgList(*massshiftedscEta),RooArgList(massmc),*zmassmcscEta,intOrder); // Gaussian smearing function sprintf(vname,"sigmascEta_%i_%i",ibin,jbin); RooFormulaVar *sigmascEta = new RooFormulaVar(vname,vname,"sqrt(@0*@0+@1*@1)",RooArgList(*sigmabins.at(ibin),*sigmabins.at(jbin))); sprintf(vname,"resscEta_%i_%i",ibin,jbin); RooGaussian *resscEta = new RooGaussian(vname,vname,mass,RooConst(0.),*sigmascEta); // Fit model: MC-template convoluted with Gaussian sprintf(vname,"fftscEta_%i_%i",ibin,jbin); RooFFTConvPdf *fftscEta = new RooFFTConvPdf(vname,vname,mass,*masstemplatescEta,*resscEta); fftscEta->setBufferStrategy(RooFFTConvPdf::Flat); // Add bin as a category char zscEta_catname[100]; sprintf(zscEta_catname,"zscEta_cat_%i_%i",ibin,jbin); zscEta_cat.defineType(zscEta_catname); zscEta_cat.setLabel(zscEta_catname); hmap.insert(pair<string,TH1*>(zscEta_catname,hDatav[n])); combscalefit.addPdf(*fftscEta,zscEta_catname); } } // perform fit RooDataHist zdatascEta_comb("zdatascEta_comb","zdatascEta_comb",RooArgList(mass),zscEta_cat,hmap,1.0); combscalefit.fitTo(zdatascEta_comb,PrintEvalErrors(kFALSE),Minos(kFALSE),Strategy(0),Minimizer("Minuit2","")); Double_t xval[scEta_limits.size()]; Double_t xerr[scEta_limits.size()]; Double_t scaleDatatoMC[scEta_limits.size()]; Double_t scaleDatatoMCerr[scEta_limits.size()]; Double_t scaleMCtoData[scEta_limits.size()]; Double_t scaleMCtoDataerr[scEta_limits.size()]; Double_t sigmaMCtoData[scEta_limits.size()]; Double_t sigmaMCtoDataerr[scEta_limits.size()]; for(UInt_t ibin=0; ibin<scEta_limits.size(); ibin++) { Double_t etalow = scEta_limits.at(ibin).first; Double_t etahigh = scEta_limits.at(ibin).second; xval[ibin] = 0.5*(etahigh+etalow); xerr[ibin] = 0.5*(etahigh-etalow); scaleDatatoMC[ibin] = ((RooRealVar*)scalebins.at(ibin))->getVal(); scaleDatatoMCerr[ibin] = ((RooRealVar*)scalebins.at(ibin))->getError(); scaleMCtoData[ibin] = 1.0/scaleDatatoMC[ibin]; scaleMCtoDataerr[ibin] = scaleDatatoMCerr[ibin]/scaleDatatoMC[ibin]/scaleDatatoMC[ibin]; sigmaMCtoData[ibin] = ((RooRealVar*)sigmabins.at(ibin))->getVal(); sigmaMCtoDataerr[ibin] = ((RooRealVar*)sigmabins.at(ibin))->getError(); } TGraphErrors *grScaleDatatoMC = new TGraphErrors(scEta_limits.size(),xval,scaleDatatoMC,xerr,scaleDatatoMCerr); TGraphErrors *grScaleMCtoData = new TGraphErrors(scEta_limits.size(),xval,scaleMCtoData,xerr,scaleMCtoDataerr); TGraphErrors *grSigmaMCtoData = new TGraphErrors(scEta_limits.size(),xval,sigmaMCtoData,xerr,sigmaMCtoDataerr); CPlot plotScale1("mu_scale_datatomc","","Muon |#eta|","Data scale correction"); plotScale1.AddGraph(grScaleDatatoMC,"",kBlue); plotScale1.SetYRange(0.98,1.02); plotScale1.AddLine(0,1,2.5,1,kBlack,7); plotScale1.Draw(c,kTRUE,format); CPlot plotScale2("mu_scale_mctodata","","Muon |#eta|","MC#rightarrowData scale correction"); plotScale2.AddGraph(grScaleMCtoData,"",kBlue); plotScale2.SetYRange(0.98,1.02); plotScale2.AddLine(0,1,2.5,1,kBlack,7); plotScale2.Draw(c,kTRUE,format); CPlot plotRes("mu_res_mctodata","","Muon |#eta|","MC#rightarrowData additional smear [GeV]"); plotRes.AddGraph(grSigmaMCtoData,"",kBlue); plotRes.SetYRange(0,1.6); plotRes.Draw(c,kTRUE,format); double nData=0; for(UInt_t ibin=0; ibin<scEta_limits.size(); ibin++) { for(UInt_t jbin=ibin; jbin<scEta_limits.size(); jbin++) { UInt_t n=jbin-ibin; for(UInt_t k=0; k<ibin; k++) n+=(scEta_limits.size()-k); // Post-fit plot RooPlot *frame = mass.frame(); char catname[100]; sprintf(catname,"zscEta_cat_%i_%i",ibin,jbin); char cutstr[100]; sprintf(cutstr,"zscEta_cat==zscEta_cat::%s",catname); RooDataHist zmc(catname,catname,RooArgList(mass),hMCv[n]); RooHistPdf mctemplate(catname,catname,RooArgList(mass),zmc,intOrder); //mctemplate.plotOn(frame,LineColor(kBlue),LineWidth(1),Normalization(hDatav[n]->GetEntries())); mctemplate.plotOn(frame,LineColor(kBlue),LineWidth(1),Normalization(hDatav[n]->Integral())); //mctemplate.plotOn(frame,LineColor(kBlue),FillColor(kBlue),FillStyle(3002),DrawOption("F"),Normalization(hDatav[n]->GetEntries())); mctemplate.plotOn(frame,LineColor(kBlue),FillColor(kBlue),FillStyle(3002),DrawOption("F"),Normalization(hDatav[n]->Integral())); zdatascEta_comb.plotOn(frame,Cut(cutstr),MarkerStyle(kFullCircle),MarkerSize(1.0),DrawOption("ZP")); combscalefit.plotOn(frame,Slice(zscEta_cat,catname),ProjWData(RooArgSet(mass,catname),zdatascEta_comb), LineColor(kGreen+2)); sprintf(pname,"postfit_%i_%i",ibin,jbin); sprintf(str1,"[%.1f, %.1f]",scEta_limits.at(ibin).first,scEta_limits.at(ibin).second); sprintf(str2,"[%.1f, %.1f]",scEta_limits.at(jbin).first,scEta_limits.at(jbin).second); CPlot plot(pname,frame,"","m(#mu^{+}#mu^{-}) [GeV/c^{2}]","Events / 0.6 GeV/c^{2}"); plot.AddTextBox(str1,0.21,0.80,0.45,0.87,0,kBlack,-1); plot.AddTextBox(str2,0.21,0.73,0.45,0.80,0,kBlack,-1); plot.SetLegend(0.75,0.64,0.93,0.88); plot.GetLegend()->AddEntry(hDummyData,"Data","PL"); plot.GetLegend()->AddEntry(hDummyMC,"Sim","FL"); plot.GetLegend()->AddEntry(hDummyFit,"Fit","L"); plot.Draw(c,kTRUE,format); nData += hDatav[n]->Integral(); } } cout<<"nData = "<<nData<<endl; //-------------------------------------------------------------------------------------------------------------- // Output //============================================================================================================== cout << "*" << endl; cout << "* SUMMARY" << endl; cout << "*--------------------------------------------------" << endl; cout << endl; ofstream txtfile; char txtfname[100]; sprintf(txtfname,"%s/summary.txt",outputDir.Data()); txtfile.open(txtfname); assert(txtfile.is_open()); txtfile << " Data->MC scale correction" << endl; for(UInt_t ibin=0; ibin<scEta_limits.size(); ibin++) { Double_t etalow = scEta_limits.at(ibin).first; Double_t etahigh = scEta_limits.at(ibin).second; txtfile << "$" << etalow << " < |\\eta| < " << etahigh << "$ & "; txtfile << "$" << ((RooRealVar*)scalebins.at(ibin))->getVal() << "$ \\pm $" << ((RooRealVar*)scalebins.at(ibin))->getError() << "$ \\\\" << endl; } txtfile << endl; txtfile << " MC->Data resolution correction [GeV]" << endl; for(UInt_t ibin=0; ibin<scEta_limits.size(); ibin++) { Double_t etalow = scEta_limits.at(ibin).first; Double_t etahigh = scEta_limits.at(ibin).second; txtfile << etalow << " < |\\eta| < " << etahigh << " & "; txtfile << "$" << ((RooRealVar*)sigmabins.at(ibin))->getVal() << "$ \\pm $" << ((RooRealVar*)sigmabins.at(ibin))->getError() << "$ \\\\" << endl; } txtfile.close(); cout << endl; cout << " <> Output saved in " << outputDir << "/" << endl; cout << endl; }
FitResult doFit(const FitSetup& setup, string conditions, string fname=string("")) { //cerr<<"DO FIT"<<endl; string varname = setup.varname; RooRealVar var(varname.c_str(),varname.c_str(),setup.varMin, setup.varMax); //string region="0btag_MTtail"; string region= setup.region; //should it be an argument ? TFile* fin = 0; if(fname=="") fin = TFile::Open(setup.filename.c_str()); //else fin = TFile::Open(fname.c_str()); else fin = TFile::Open(fname.c_str()); //-- normalisation in the MC --// float mc_norm_1ltop = 0; float mc_norm_tt2l = 0; float mc_norm_Wjets = 0; //float mc_norm_rare = 0; // C r e a t e m o d e l f o r CR1_peak_lowM3b // ------------------------------------------------------------- // Construct pdfs for 1ltop, tt2l, Wjets and rare TH1F* histo_1ltop = 0; TH1F* histo_tt2l = 0; TH1F* histo_Wjets = 0; RooHistPdf *pdf_1ltop = GetRooHistPdf(fin,region,PROCESS_NAME_TT_1L,varname,&var,mc_norm_1ltop, histo_1ltop, setup.do_mcstat, DO_NORM, setup.Ndata*setup.rel_norm_1ltop); RooHistPdf *pdf_tt2l = GetRooHistPdf(fin,region,PROCESS_NAME_TT_2L,varname,&var,mc_norm_tt2l, histo_tt2l, setup.do_mcstat, DO_NORM, setup.Ndata*setup.rel_norm_tt2l); RooHistPdf *pdf_Wjets = GetRooHistPdf(fin,region,PROCESS_NAME_WJETS,varname,&var,mc_norm_Wjets, histo_Wjets, setup.do_mcstat, DO_NORM, setup.Ndata*setup.rel_norm_Wjets); //RooHistPdf *pdf_rare = GetRooHistPdf(fin,region,PROCESS_NAME_RARE,varname,&var,mc_norm_rare, setup.do_mcstat); //cerr<<"TT_1L: "<<mc_norm_1ltop<<endl; //cerr<<"TT_2l: "<<mc_norm_tt2l<<endl; //cerr<<"WJets: "<<mc_norm_Wjets<<endl; //cerr<<"OTHER: "<<mc_norm_rare<<endl; // normalization factors (RooRealVar) float val_1ltop = mc_norm_1ltop; float val_Wjets = mc_norm_Wjets; if(setup.do_init_uncert) { val_1ltop = setup.init_1ltop*mc_norm_1ltop; val_Wjets = setup.init_Wjets*mc_norm_Wjets; } RooRealVar norm_1ltop("norm_1ltop","norm_1ltop",val_1ltop,0.25*mc_norm_1ltop,10.*mc_norm_1ltop); RooRealVar norm_Wjets("norm_Wjets","norm_Wjets",val_Wjets,0.25*mc_norm_Wjets,10.*mc_norm_Wjets); RooRealVar norm_tt2l("norm_tt2l","norm_tt2l",mc_norm_tt2l,0.25*mc_norm_tt2l,2*mc_norm_tt2l); //RooRealVar norm_rare("norm_rare","norm_rare",mc_norm_rare,0.25*mc_norm_rare,2*mc_norm_rare); // possibility to study a systematic on it if(setup.do_xs_tt2l_sys) mc_norm_tt2l*=setup.xs_sysfactor; //if(setup.do_xs_rare_sys) mc_norm_rare*=setup.xs_sysfactor; //RooConstVar norm_rare("norm_rare","norm_rare",mc_norm_rare); /* RooAddPdf model("model","model", RooArgList(*pdf_1ltop,*pdf_tt2l,*pdf_Wjets,*pdf_rare), RooArgList(norm_1ltop,norm_tt2l,norm_Wjets,norm_rare)) ; */ RooAddPdf model("model","model", RooArgList(*pdf_1ltop,*pdf_tt2l,*pdf_Wjets), RooArgList(norm_1ltop,norm_tt2l,norm_Wjets)) ; //RooDataHist *data_CR1_peak_lowM3b = GetRooData(fin,region,varname,&var); RooDataHist *data_CR1_peak_lowM3b = GetRooData(histo_1ltop,histo_Wjets, histo_tt2l,&var); fin->Close(); //-- Constraints on single top and rare --// float RelUncert = 0.2; // Construct another Gaussian constraint p.d.f on "rare" bkg //RooGaussian constr_rare("constr_rare","constr_rare",norm_rare,RooConst(mc_norm_rare),RooConst(RelUncert*mc_norm_rare)) ; // Construct another Gaussian constraint p.d.f on "tt2l" bkg RooGaussian constr_tt2l("constr_tt2l","constr_tt2l",norm_tt2l,RooConst(mc_norm_tt2l),RooConst(RelUncert*mc_norm_tt2l)) ; // P e r f o r m t em p l a t e f i t // --------------------------------------------------- //Minimizer(type,algo) -- Choose minimization package and algorithm to use. Default is MINUIT/MIGRAD through the RooMinimizer // interface, but rare can be specified (through RooMinimizer interface). Select OldMinuit to use // MINUIT through the old RooMinuit interface // // Type Algorithm // ------ --------- // OldMinuit migrad, simplex, minimize (=migrad+simplex), migradimproved (=migrad+improve) // Minuit migrad, simplex, minimize (=migrad+simplex), migradimproved (=migrad+improve) // Minuit2 migrad, simplex, minimize, scan // GSLMultiMin conjugatefr, conjugatepr, bfgs, bfgs2, steepestdescent // GSLSimAn - // --- Perform simultaneous fit of model to data and model_ctl to data_ctl --// //RooFitResult* res = model.fitTo(*data_CR1_peak_lowM3b,Save()); //RooFitResult* res = model.fitTo(*data_CR1_peak_lowM3b,ExternalConstraints(constr_rare),ExternalConstraints(constr_tt2l),PrintLevel(-1),Save(), RooFitResult* res = model.fitTo(*data_CR1_peak_lowM3b,ExternalConstraints(constr_tt2l),PrintLevel(-1),Save(), Minimizer(setup.type.c_str(),setup.algo.c_str()),Verbose(0)); //--- Writing the results ---/// FitResult fitRes; fitRes.Reset(); fitRes.norm_1ltop = mc_norm_1ltop; fitRes.SF_1ltop = GetSF(res,"norm_1ltop"); fitRes.SF_Wjets = GetSF(res,"norm_Wjets"); fitRes.edm = res->edm(); fitRes.correlation = res->correlationMatrix()[0][1]; fitRes.conditions = conditions; return fitRes; }
Minimizer PreconditionedDownhill(Functional f, const GridDescription &gdin, double kT, VectorXd *data, double viscosity) { return Minimizer(new PreconditionedDownhillType(f, gdin, kT, data, viscosity)); }