/***********************************************************************//** * @brief Read response table from FITS table HDU * * @param[in] table Response table. * * Reads CTA response table information from a FITS table. The FITS table * is expected to have a single row, and axes and parameter information are * found in vector columns. Axes information is expected to be placed * before parameter information. * * Each axis is defined by two vector columns of equal width, describing the * lower and upper limits for each bin. The column names for this boundary * information terminates by "_LO" and "HI", respectively (upper case). It * is expected that the "_LO" column preceeds the "_HI" column. * * Following the axes columns are parameter columns of equal width. The width * of each parameter column is given by the product of the lengths of all * axes. It is furthermore expected that the first axis is the most rapidely * varying index of the vector. * * In case that the HDU table pointer is not valid (i.e. NULL), this method * clears the objects and does nothing else. ***************************************************************************/ void GCTAResponseTable::read(const GFitsTable& table) { // Clear instance clear(); // Read column names read_colnames(table); // Read axes read_axes(table); // Read parameter cubes read_pars(table); // Return return; }
int main() { read_pars("analysis_pars"); read_ensemble_pars(base_path,T,TSEP,ibeta,nmass,mass,iml_un,nlight,ntheta,theta,ist_th,njack,data_list_file); TH=L=T/2; //we stick to the light-heavy int im_spec=0; //load the 2 points and fits M and Z jack M[nmass]; jack Z[nmass]; for(int im_S0=0;im_S0<nmass;im_S0++) { int ith_S0=ist_th; fit_ZM_2pts(M[im_S0],Z[im_S0],im_spec,im_S0,ith_S0); } ofstream FT_file("FT.dat"); ofstream FP_file("FP.dat"); ofstream FM_file("FM.dat"); ofstream F0_file("F0.dat"); FILE *Q2_file=open_file("Q2_combo","w"); //loop over all the theta combination for(int ith_S1=0;ith_S1<ntheta;ith_S1++) for(int ith_S0=ith_S1;ith_S0<ntheta;ith_S0++) { //compute energy jack E_source=Eth(M[gen_im_S0],ith_S0); jack E_sink=Eth(M[gen_im_S1],ith_S1); //construct time dependance jvec time_dep(T,njack); for(int t=0;t<T;t++) { jack coef=Z[gen_im_S0]*Z[gen_im_S1]/sqrt(2*E_source*2*E_sink); jack forw,back; if(t<TSEP) { forw=exp(-E_source*t-E_sink*(TSEP-t)); back=exp(-E_source*(T-t)-E_sink*(T-(TSEP-t))); } else { forw=exp(-E_source*(T-t)-E_sink*(t-TSEP)); back=exp(-E_source*t-E_sink*(T-(t-TSEP))); } time_dep[t]=coef*(forw+back); } //loop over matrix element int iV0=0,iVK=1,iTK=2; char MEL_name[3][4]={"V0","VK","TK"}; jack MEL[3]; jvec MEL_corr[3]; for(int iMEL=0;iMEL<3;iMEL++) { //load 3pts jvec P5MELP5; switch(iMEL) { case 0:P5MELP5=load_P5V0P5(gen_im_S0,ith_S0,gen_im_S1,ith_S1);break; case 1:P5MELP5=load_P5VKP5(gen_im_S0,ith_S0,gen_im_S1,ith_S1);break; case 2:P5MELP5=load_P5TKP5(gen_im_S0,ith_S0,gen_im_S1,ith_S1);break; } P5MELP5.subset(1,TSEP).print_to_file(combine("P5%sP5/%02d_%d_%02d_%d.xmg",MEL_name[iMEL],gen_im_S0,ith_S0,gen_im_S1,ith_S1).c_str()); time_dep.subset(1,TSEP).print_to_file(combine("time_dep/%02d_%d_%02d_%d.xmg",gen_im_S0,ith_S0,gen_im_S1,ith_S1).c_str()); //compute remotion of time dependance MEL_corr[iMEL]=P5MELP5/time_dep; MEL[iMEL]=constant_fit(MEL_corr[iMEL].subset(1,TSEP),tmin_MEL,tmax_MEL,combine("%s/%02d_%d_%02d_%d.xmg",MEL_name[iMEL],gen_im_S0,ith_S0,gen_im_S1,ith_S1).c_str()); } //ratios jvec VK_fr_TK_corr=MEL_corr[iVK]/MEL_corr[iTK]; jack VK_fr_TK=constant_fit(VK_fr_TK_corr.subset(1,TSEP),tmin_MEL,tmax_MEL,combine("VK_fr_TK/%02d_%d_%02d_%d.xmg",gen_im_S0,ith_S0,gen_im_S1,ith_S1).c_str()); jvec V0_fr_TK_corr=MEL_corr[iV0]/MEL_corr[iTK]; jack V0_fr_TK=constant_fit(V0_fr_TK_corr.subset(1,TSEP),tmin_MEL,tmax_MEL,combine("V0_fr_TK/%02d_%d_%02d_%d.xmg",gen_im_S0,ith_S0,gen_im_S1,ith_S1).c_str()); //////////////////////////// compute_form_factors /////////////////////// //2*Zv MEL[iVK]/=-2*1.69; MEL[iV0]/=-2*1.69; //impulses jack M_source=M[gen_im_S0],M_sink=M[gen_im_S1]; double p_source=M_PI*theta[ith_S0]/L,p_sink=M_PI*theta[ith_S1]/L; jack P0=E_source+E_sink,Q0=E_source-E_sink; double Pi=p_source+p_sink,Qi=p_source-p_sink; jack Q2=Q0*Q0-3*Qi*Qi; jack P2=P0*P0-3*Pi*Pi; jack DM2=M_source*M_source-M_sink*M_sink; //TK=(E_source*p_sink-p_source*E_sink)*2FT/(M_source+M_sink) jack FT=MEL[iTK]*0.5*(M_source+M_sink)/(E_source*p_sink-p_source*E_sink); //FP,F0 jack FM,FP,F0; if(ith_S0!=iopp[ith_S1]) { jack DET=P0*Qi-Q0*Pi; //calculate (f+) and (f-)*Q2/(M_K^2-M_Pi^2) FP=(MEL[iV0]*Qi-Q0*MEL[iVK])/DET; FM=(P0*MEL[iVK]-MEL[iV0]*Pi)/DET; F0=FP+Q2/DM2*FM; } else { //calculate (f+) and (f-)*Q2/(M_K^2-M_Pi^2) FM=MEL[iVK]/Qi; FP=(MEL[iV0]-Q0*FM)/P0; F0=FP+Q2/DM2*FM; } FT_file<<Q2<<" "<<FT<<" "<<Pi<<endl; FP_file<<Q2<<" "<<FP<<" "<<Pi<<endl; FM_file<<Q2<<" "<<FM<<" "<<Pi<<endl; F0_file<<Q2<<" "<<F0<<" "<<Pi<<endl; fprintf(Q2_file,"th0=%f\t" "th1=%f\t" "Q2=%f\n",theta[ith_S0],theta[ith_S1],Q2.med()); } fclose(Q2_file); return 0; }
int main() { read_pars("input"); read_ensemble_pars(base_path,T,ibeta,nmass,mass,iml_un,nlights,data_list_file); TH=L=T/2; int ncombo=nmass*nmass; //load all the corrs double *buf=new double[4*ncombo*T*(njack+1)]; FILE *fin=open_file(combine("%s/%s",base_path,corr_name).c_str(),"r"); int stat=fread(buf,sizeof(double),4*ncombo*(njack+1)*T,fin); if(stat!=4*ncombo*(njack+1)*T) { cerr<<"Error loading data!"<<endl; exit(1); } jvec M(ncombo,njack); jvec Z2(ncombo,njack); //define minuit staff TMinuit minu(2); minu.SetPrintLevel(-1); minu.SetFCN(chi2); corr_fit=new double[TH+1]; corr_err=new double[TH+1]; int ic=0; //fit each combo for(int ims=0;ims<nmass;ims++) for(int imc=0;imc<nmass;imc++) { int ic1=0+2*(imc+nmass*(0+2*ims)); int ic2=1+2*(imc+nmass*(1+2*ims)); printf("%d %d\n",ic1,ic2); //take into account corr jvec corr(T,njack); jvec corr1(T,njack); jvec corr2(T,njack); corr1.put(buf+ic1*T*(njack+1)); corr2.put(buf+ic2*T*(njack+1)); corr=(corr1+corr2)/2; //choose the index of the fitting interval if(ims>=nlights) ifit_int=2; else if(imc>=nlights) ifit_int=1; else ifit_int=0; //simmetrize corr=corr.simmetrized(parity); int ttmin=tmin[ifit_int]; int ttmax=tmax[ifit_int]; jvec Mcor=effective_mass(corr),Z2cor(TH+1,njack); jack Meff=constant_fit(Mcor,ttmin,ttmax); for(int t=0;t<=TH;t++) for(int ijack=0;ijack<=njack;ijack++) Z2cor[t].data[ijack]=corr[t].data[ijack]/fun_fit(1,Meff[ijack],t); jack Z2eff=constant_fit(Z2cor,ttmin,ttmax); if(!isnan(Z2eff[0])) minu.DefineParameter(0,"Z2",Z2eff[0],Z2eff.err(),0,2*Z2eff[0]); if(!isnan(Meff[0])) minu.DefineParameter(1,"M",Meff[0],Meff.err(),0,2*Meff[0]); for(int t=tmin[ifit_int];t<=tmax[ifit_int];t++) corr_err[t]=corr.data[t].err(); //jacknife analysis for(int ijack=0;ijack<njack+1;ijack++) { //copy data so that glob function may access it for(int t=tmin[ifit_int];t<=tmax[ifit_int];t++) corr_fit[t]=corr.data[t].data[ijack]; //fit double dum; minu.Migrad(); minu.GetParameter(0,Z2.data[ic].data[ijack],dum); minu.GetParameter(1,M.data[ic].data[ijack],dum); } //if((ims==iml_un||ims==nlights-1||ims==nlights||ims==nmass-1)&& //(imc==iml_un||imc==nlights-1||imc==nlights||imc==nmass-1)) { //plot eff mass { ofstream out(combine("eff_mass_plot_%02d_%02d.xmg",ims,imc).c_str()); out<<"@type xydy"<<endl; out<<"@s0 line type 0"<<endl; out<<Mcor<<endl; out<<"&"<<endl; out<<"@type xy"<<endl; double av_mass=M[ic].med(); double er_mass=M[ic].err(); out<<tmin[ifit_int]<<" "<<av_mass-er_mass<<endl; out<<tmax[ifit_int]<<" "<<av_mass-er_mass<<endl; out<<tmax[ifit_int]<<" "<<av_mass+er_mass<<endl; out<<tmin[ifit_int]<<" "<<av_mass+er_mass<<endl; out<<tmin[ifit_int]<<" "<<av_mass-er_mass<<endl; } //plot fun { ofstream out(combine("fun_plot_%02d_%02d.xmg",ims,imc).c_str()); out<<"@type xydy"<<endl; out<<"@s0 line type 0"<<endl; out<<corr<<endl; out<<"&"<<endl; out<<"@type xy"<<endl; for(int t=tmin[ifit_int];t<tmax[ifit_int];t++) out<<t<<" "<<fun_fit(Z2[ic][njack],M[ic][njack],t)<<endl; } } cout<<mass[ims]<<" "<<mass[imc]<<" "<<M[ic]<<" "<<Z2[ic]<<" "<<sqrt(Z2[ic])/(sinh(M[ic])*M[ic])*(mass[ims]+mass[imc])<<endl; ic++; } ofstream out("fitted_mass.xmg"); out<<"@type xydy"<<endl; for(int ims=0;ims<nmass;ims++) { //out<<"s0 line type 0"<<endl; for(int imc=0;imc<nmass;imc++) { int ic=imc+nmass*ims; out<<mass[imc]<<" "<<M[ic]<<endl; } out<<"&"<<endl; } M.write_to_binfile(out_file); Z2.append_to_binfile(out_file); return 0; }
main (int argc, char *argv[]) { int i, j, **seqs, **nall, ord=1, ns, **pij, lkf=0, npt=0, pnew=0, anc=0; int tcat=1, rcat=0, verb=1, miss=0, *flocs; int sw_flag=0, moment_flag=0, rmin_flag=0, sim_flag=0, test_flag=0; char fname[MAXNAME+1], **seqnames; long seed=-setseed(); extern int sizeofpset; double *locs; double **lkmat, *lkres; FILE *ifp=NULL, *ifp2=NULL, *ifp3=NULL, *tfp; struct site_type **pset; struct data_sum *data; int ask_questions = 1; char *in_str; print_help(argc, argv); idum = &seed; data = malloc((size_t) sizeof(struct data_sum)); data->exact = 0; strcpy(data->prefix, ""); for(i = 0; i < argc; i++) { if(*argv[i] == '-') { in_str = argv[i]; ask_questions = 0; if(strcmp(in_str, "-seq") == 0) ifp = fopen(argv[i+1], "r"); if(strcmp(in_str, "-loc") == 0) ifp2 = fopen(argv[i+1], "r"); if(strcmp(in_str, "-lk") == 0) { lkf = 1; ifp3 = fopen(argv[i+1], "r"); } if(strcmp(in_str, "-exact") == 0) data->exact = 1; if(strcmp(in_str, "-concise") == 0) verb=0; if(strcmp(in_str, "-window") == 0) sw_flag=1; if(strcmp(in_str, "-moment") == 0) moment_flag=1; if(strcmp(in_str, "-simulate") == 0) sim_flag=1; if(strcmp(in_str, "-rmin_flag") == 0) rmin_flag=2; if(strcmp(in_str, "-test") == 0) test_flag=1; if(strcmp(in_str, "-prefix") == 0) strcpy(data->prefix, argv[i+1]); } } if (ifp == NULL) { printf("\nCould not find seqs file in command line.\n"); printf("\nInput filename for seqs:\n"); scanf("%s", &fname); ifp = fopen(fname, "r"); } if (ifp == NULL) nrerror("Error in opening sequence file"); fscanf(ifp,"%i%i%i", &data->nseq, &data->lseq, &data->hd); if ((data->nseq < 2) || (data->lseq < 2)) {printf("\n\nInsufficient data for analysis (n > 1, L > 1) \n\n"); exit(1);} if (data->nseq > SEQ_MAX) {printf("\n\nMore than max no. sequences: Using first %i for analysis\n\n", SEQ_MAX); data->nseq=SEQ_MAX;} printf("\nAnalysing %i (n=%i) sequences of length %i seg sites\n", data->nseq, data->hd, data->lseq); seqs = imatrix(1, data->nseq, 1, data->lseq); seqnames = cmatrix(1, data->nseq+11, 1, MAXNAME+11); if (read_fasta(seqs, ifp, data->nseq, data->lseq, seqnames)) printf("\nSequences read succesfully\n"); fclose(ifp); nall = imatrix(1, data->lseq, 1, 6); allele_count(seqs, data->nseq, data->lseq, nall,1, data->hd, data->prefix); /*Store lnfac values in array for speed of computation*/ lnfac_array = (double *) malloc((size_t) ((int) (data->nseq+2)*(data->hd))*sizeof(double)); lnfac_array[0]=lnfac_array[1]=0; for (j=2;j<=((int) data->nseq*(data->hd));j++) lnfac_array[j]=(double) lnfac_array[j-1]+log(j); /*Open file with location of seg sites and read in data*/ if (ifp2 == NULL) { printf("\nCould not find locs file in command line.\n"); printf("\nInput name of file containing location of seg sites\n\n"); scanf("%s", &fname); ifp2 = fopen(fname, "r"); } if (ifp2 == NULL) nrerror("Cannot open loc file"); fscanf(ifp2, "%i %lf %c", &ns, &data->tlseq, &data->lc); if (ns != data->lseq) nrerror("Lseq and Locs disagree"); if ((data->lc != 'C')&&(data->lc != 'L')) nrerror("Must input linear(L)/conversion(C)"); if (data->lc == 'C') { data->avc=0; while (data->avc <= 0) { printf("\n\nInput average tract length for conversion model: ");scanf("%lf", &(data->avc)); } } locs = dvector(1, data->lseq); flocs = ivector(1, data->lseq); /*Array to use when simulating data*/ for (i=1; i<=data->lseq; i++) { fscanf(ifp2, "%lf", &locs[i]); if ((locs[i]==0)||(locs[i]>data->tlseq)) {printf("\n\nError in Loc file\n\n%lf\n", data->tlseq); exit(1);} if (i>1 && locs[i]<=locs[i-1]) nrerror("Error in locs file: SNPs must be montonically increasing"); } printf("\nLocation of seg sites\n\n"); for (i=1; i<=data->lseq; i++) printf("%3i %4.2lf\n", i, locs[i]); fclose(ifp2); /*Read in likelihood file where needed*/ if (ask_questions) { printf("\n\nUse existing likelihood file? (yes=1, no=0):"); scanf("%i", &lkf); /*lkf is a flag: 1 means use existing likelihood file as starting point*/ if (lkf) { printf("\n\nInput name of likelihood file: "); scanf("%s", &fname); ifp3 = fopen(fname, "r"); } else data->exact=0; if (lkf == 1) { printf("\n\nIs likelihood file an exact match to data?(no=0/yes=1): "); scanf("%i", &data->exact); } } if (lkf && !ifp3) nrerror("Cannot open likelihood file"); if (!lkf && data->hd==2) nrerror("For diploid data need complete lookup table for sequences"); /*Store pair-types in pij matrix - classify in pair_spectrum routine*/ data->w = data->lseq; /*Note for this program use all data - pair_int restricts to a smaller window*/ pij = imatrix((int) 1,(int) data->lseq,(int) 1,(int) data->w); for (i=1;i<=data->lseq;i++) for (j=1;j<=data->w;j++) pij[i][j]=0; pset = init_pset(pset, lkf, ifp3, &npt, data); /*Reads in type configurations from likelihood file*/ printf("\n\n*** Calculating distribution of pair types ***\n\n"); pset = pair_spectrum(seqs, data, nall, pset, &npt, &pnew, &miss, anc, pij); printf("\n\n *** Completed classification of pair types ***\n\n"); if (data->exact && (pnew || miss)) nrerror("Lookup table is not exact for sequences\n(possibly generated by interval)"); printf("\n\nOld = %i: New = %i: Missing = %i\n\n", npt,pnew,miss); data->ptt = (int) npt+pnew+miss; /*npt is number from likelihood file, pnew is number new with no missing data, miss is # new with missing data*/ if (verb) { strcpy(fname, data->prefix); tfp = fopen(strcat(fname, "type_table.txt"), "w"); if (!tfp) nrerror("Cannot open type file"); type_print(pij, data->lseq, data->w,tfp); fclose(tfp); } if (verb) print_pairs(stdout, pset, npt+pnew, data->hd, data->nseq); /*Need a complete set for missing data or diploid data - check this*/ if (!data->exact && (data->hd ==2 || miss)) { printf("\n\nMissing data or diploid: checking that likelihood table is exhaustive\n\n"); check_exhaustive(pset,npt,(data->nseq)*((int) data->hd)); } /*Read parameters and likelihoods from likelihood file - where appropriate*/ if (lkf) { read_pars(ifp3, &tcat, &data->th, &data->rcat, &data->rmax); lkmat = dmatrix(1,npt+pnew+miss,1,data->rcat); if (lkf) read_lk(ifp3, lkmat, npt, tcat, data->rcat); } /*If haploid, but novel types, need to calculate new likelihoods and input parameter values*/ if (data->hd ==1 && pnew) { /*Note can have pnew for diploid data, but this has been checked for already*/ if (!lkf) { data->th=data->rmax=-1.0; data->rcat=0; printf("\n\nInput theta per site (suggest Watterson estimate of %.5lf):",(double) data->lseq/(watterson(data->nseq*data->hd)*data->tlseq)); while (data->th<0.0) scanf("%lf", &data->th); printf("\n\nMax 4Ner for grid (suggest 100):"); while(data->rmax<0.0) scanf("%lf", &data->rmax); printf("\n\nNumber of points on grid (suggest 101, min=2):"); while(data->rcat<2) scanf("%i", &data->rcat); lkmat = dmatrix(1,npt+pnew+miss,1,data->rcat); } lk_est(pset,npt,pnew,lkmat,data->th,data->rcat,data->rmax); data->exact=1; } /*Sum over missing data or resolve genotypes and sum over missing data+configurations*/ else if (miss && data->hd==1) { printf("\n\n*** Calculating likelihoods for missing data ***\n\n"); for (i=1;i<=miss;i++) { lk_miss(pset[npt+i],lkmat[npt+i],lkmat,data); printf("\rType %i", i); } printf(" ...Done!\n\n"); } /*Sum over resolutions for diploid data*/ else if (data->hd==2 && !data->exact) { printf("\n\n*** Resolving diploid data: %i ***\n\n",pnew+miss); lkres = dvector(1,data->rcat); for (i=1;i<=pnew+miss;i++) { lk_resolve(lkres,pset[npt+i],lkmat[npt+i],lkmat,data); printf("\rType %i", i); } free_dvector(lkres,1,data->rcat); printf(" ...Done!\n\n"); } /*If new likelihood generated can output likelihood file for future analyses*/ if (verb) print_lks(pset, data, npt+pnew+miss, lkmat); /*Basic analysis - estimation of 4Ner asuming constant rate*/ data->rme=data->rmax; data->rce=data->rcat; if (1) { printf("\n\nDo you wish to change grid over which to estimate likelihoods for (default = %i points, 4Ner 0 - %.1lf) (1/0) :",data->rcat,data->rmax); scanf("%i", &lkf); if (lkf) { data->rme=-10; data->rce=0; printf("\n\nMax 4Ner for estimation : "); while (data->rme < 0.0) scanf("%lf", &data->rme); printf("\n\nNumber of classes to estimate for: "); while (data->rce < 1) scanf("%i", &data->rce); } } data->lksurf = dmatrix(1,data->rce,1,2); lk_surf(pset, pij, data, lkmat, data->th, locs, 1); /*Print marginal likelihood ratio test statistics for each pair of sites*/ printf("\n\nCalculating fits\n\n"); fit_pwlk(data,pij,locs,lkmat,verb); /*Sliding windows version*/ if (1) { printf("\n\nDo you wish to carry out a sliding windows analysis? (yes=1/no=0):"); scanf("%i", &sw_flag); } if (sw_flag) lk_win(pset,pij,data,lkmat,locs,nall); /*Nonparametric estimation of recombination rate*/ if (1) { printf("\n\nPrint out table of Rmin values?\n(0=No, 1=Total only, 2=Full table):"); scanf("%i", &rmin_flag); } if (rmin_flag) { rmin(data, pset, pij, locs, lkf-1); printf("\n\nLower bound on Rmin = %i\n\n",data->rmin); } /*Estimate 4Ner by Wakeley 1997 method*/ if (1) { printf("\n\nEstimate 4Ner by moment method? (yes=1, no=0)"); scanf("%i", &moment_flag); } if (moment_flag) wakeley_est(data, seqs, locs); /*Recombination tests - only available for haploid data!*/ if (data->hd==1) { if (1) { printf("\n\nDo you wish to test for recombination? (yes=1, no=0): "); scanf("%i", &test_flag); } if (test_flag) { rec_test(data, pij, locs, lkmat, pset, npt+pnew+miss); } } /*Conditional simulation - only available for haploid data with a complete lk file*/ if (data->hd==1 && !(data->exact)) { if (1) { printf("\n\nDo you wish to test constant-rate model and estimate sampling distribution by simulation? (yes=1/no=0): "); scanf("%i", &test_flag); } if (test_flag) { freq_min(locs, flocs, nall, data); printf("\n\nHow many simulations? "); scanf("%i", &lkf); snp_sim(locs, flocs, pset, lkmat, lkf, data); } } free_imatrix(pij,1,data->lseq,1,data->w); free_imatrix(seqs,1,data->nseq,1,data->lseq); free_imatrix(nall,1,data->lseq,1,5); for (i=1;i<sizeofpset;i++) free(pset[i]); free(pset); free(data); free_dvector(locs, 1, data->lseq); free_ivector(flocs, 1, data->lseq); /* system("PAUSE"); */ }
void create_model_ratio_hist1( const char* model_pars_file = "outputfiles/model-pars-qcdmc3.txt", const char* qcd_ratio_file = "outputfiles/qcdmc-ratio-v3.root" ) { setup_bins(); gDirectory -> Delete( "h*" ) ; loadHist( qcd_ratio_file, "qcdmc" ) ; read_pars( model_pars_file ) ; TH1F* h_ratio_all = new TH1F( "h_ratio_all", "QCD model H/L ratio", nb_global_after_exclusion, 0.5, nb_global_after_exclusion + 0.5 ) ; TH1F* h_max_ldp_weight_search_bins = get_hist( "h_max_ldp_weight_search_bins_qcdmc" ) ; TH1F* h_ldp_search_bins = get_hist( "h_ldp_search_bins_qcdmc" ) ; TH1F* h_hdp_search_bins = get_hist( "h_hdp_search_bins_qcdmc" ) ; TH1F* h_ratio_qcdmc = get_hist( "h_ratio_qcdmc" ) ; int bi_hist_with_exclusion(0) ; for ( int bi_nj=1; bi_nj<=nb_nj; bi_nj++ ) { for ( int bi_nb=1; bi_nb<=nb_nb; bi_nb++ ) { for ( int bi_htmht=4; bi_htmht<=nb_htmht; bi_htmht++ ) { if ( is_this_bin_excluded(bi_nj-1, bi_nb-1, bi_htmht-1) ) continue; bi_hist_with_exclusion++; // these few lines should be changed when we update the code that produces qcdmc-ratio-v3.root int bi_ht, bi_mht ; htmht_bin_to_ht_and_mht_bins( bi_htmht, bi_ht, bi_mht ) ; char label[100] ; sprintf( label, " %3d Nj%d-Nb%d-MHT%d-HT%d (%d)", bi_hist_with_exclusion, bi_nj, bi_nb-1, bi_mht-1, bi_ht, bi_htmht-3 ) ; double model_ratio_val = 0; double model_ratio_err = 0; model_ratio_val = par_val_ht[bi_ht] * par_val_njet[bi_nj] * par_val_ht_mht[bi_ht][bi_mht] * par_val_nb[bi_nb] ; model_ratio_err = model_ratio_val * sqrt( pow( par_err_ht_fit[bi_ht]/par_val_ht[bi_ht], 2. ) + pow( par_err_ht_syst[bi_ht]/par_val_ht[bi_ht], 2. ) + pow( par_err_njet_fit[bi_nj]/par_val_njet[bi_nj], 2. ) + pow( par_err_njet_syst[bi_nj]/par_val_njet[bi_nj], 2. ) + pow( par_err_ht_mht[bi_ht][bi_mht]/par_val_ht_mht[bi_ht][bi_mht], 2. ) + pow( par_err_nb[bi_nb]/par_val_nb[bi_nb], 2. ) ) ; printf(" %s : Nj %6.4f Nb %6.4f MHT %6.4f HT %6.4f model ratio = %6.4f +/- %6.4f\n", label, par_val_njet[bi_nj], par_val_nb[bi_nb], par_val_ht_mht[bi_ht][bi_mht], par_val_ht[bi_ht], model_ratio_val, model_ratio_err ) ; h_ratio_all -> GetXaxis() -> SetBinLabel( bi_hist_with_exclusion, label ) ; h_ratio_all -> SetBinContent( bi_hist_with_exclusion, model_ratio_val ) ; h_ratio_all -> SetBinError( bi_hist_with_exclusion, model_ratio_err ) ; } // bi_htmht } // bi_nb } // bi_nj gStyle -> SetOptStat(0) ; gStyle -> SetPadBottomMargin(0.30) ; h_ratio_all -> SetMarkerStyle( 22 ) ; h_ratio_all -> SetMarkerColor( 2 ) ; h_ratio_all -> GetXaxis() -> LabelsOption("v") ; h_ratio_all -> Draw() ; gPad -> SetGridy(1) ; //--------------- TH1F* h_ratio_qcdmc_minus_model = new TH1F( "h_ratio_qcdmc_minus_model", "QCD H/L ratio difference (QCD MC - model)", nb_global_after_exclusion, 0.5, nb_global_after_exclusion + 0.5 ) ; printf("\n\n") ; bi_hist_with_exclusion = 0; for ( int bi_nj=1; bi_nj<=nb_nj; bi_nj++ ) { for ( int bi_nb=1; bi_nb<=nb_nb; bi_nb++ ) { for ( int bi_htmht=4; bi_htmht<=nb_htmht; bi_htmht++ ) { if ( is_this_bin_excluded(bi_nj-1, bi_nb-1, bi_htmht-1) ) continue; bi_hist_with_exclusion++; // these few lines should be changed when we update the code that produces qcdmc-ratio-v3.root float model_val = h_ratio_all -> GetBinContent( bi_hist_with_exclusion ) ; float qcdmc_val = h_ratio_qcdmc -> GetBinContent( bi_hist_with_exclusion ) ; float ldp_val = h_ldp_search_bins -> GetBinContent( bi_hist_with_exclusion ) ; float hdp_val = h_hdp_search_bins -> GetBinContent( bi_hist_with_exclusion ) ; float max_ldp_weight = h_max_ldp_weight_search_bins -> GetBinContent( bi_hist_with_exclusion ) ; char label[100] ; sprintf( label, "%s", h_ratio_all -> GetXaxis() -> GetBinLabel( bi_hist_with_exclusion ) ) ; float diff_val(0.) ; float diff_err(0.) ; printf(" debug1 : model bin label = %s , qcdmc bin label = %s\n", h_ratio_all -> GetXaxis() -> GetBinLabel( bi_hist_with_exclusion ), h_ratio_qcdmc -> GetXaxis() -> GetBinLabel( bi_hist_with_exclusion ) ) ; if ( hdp_val > 0 ) { diff_val = qcdmc_val - model_val ; std::cout << qcdmc_val << " " << model_val << " " << diff_val << std::endl; diff_err = diff_val ; printf(" %40s : LDP %7.1f HDP %7.1f max LDP weight %5.3f, diff err = %5.3f\n", label, ldp_val, hdp_val, max_ldp_weight, diff_err ) ; } else { diff_val = 0. ; if ( ldp_val > 0 ) { diff_err = max_ldp_weight / ldp_val ; printf(" %40s : LDP %7.1f HDP %7.1f max LDP weight %5.3f, zero HDP H/L err = %5.3f\n", label, ldp_val, hdp_val, max_ldp_weight, diff_err ) ; } else { //diff_err = 0.5 ; //diff_err = 0.2; diff_err = 0.0; printf(" %40s : LDP %7.1f HDP %7.1f max LDP weight %5.3f, *** both zero\n", label, ldp_val, hdp_val, max_ldp_weight ) ; } } h_ratio_qcdmc_minus_model -> SetBinContent( bi_hist_with_exclusion, diff_val ) ; h_ratio_qcdmc_minus_model -> SetBinError( bi_hist_with_exclusion, diff_err ) ; h_ratio_qcdmc_minus_model -> GetXaxis() -> SetBinLabel( bi_hist_with_exclusion, label ) ; } // bi_htmht }//bi_nb }//bi_nj printf("\n\n") ; h_ratio_qcdmc_minus_model -> GetXaxis() -> LabelsOption( "v" ) ; saveHist("outputfiles/model-ratio-hist1.root", "h*" ) ; } // create_model_ratio_hist1