Ejemplo n.º 1
0
int gmx_analyze(int argc,char *argv[])
{
  static const char *desc[] = {
    "g_analyze reads an ascii file and analyzes data sets.",
    "A line in the input file may start with a time",
    "(see option [TT]-time[tt]) and any number of y values may follow.",
    "Multiple sets can also be",
    "read when they are seperated by & (option [TT]-n[tt]),",
    "in this case only one y value is read from each line.",
    "All lines starting with # and @ are skipped.",
    "All analyses can also be done for the derivative of a set",
    "(option [TT]-d[tt]).[PAR]",

    "All options, except for [TT]-av[tt] and [TT]-power[tt] assume that the",
    "points are equidistant in time.[PAR]",

    "g_analyze always shows the average and standard deviation of each",
    "set. For each set it also shows the relative deviation of the third",
    "and forth cumulant from those of a Gaussian distribution with the same",
    "standard deviation.[PAR]",

    "Option [TT]-ac[tt] produces the autocorrelation function(s).[PAR]",
    
    "Option [TT]-cc[tt] plots the resemblance of set i with a cosine of",
    "i/2 periods. The formula is:[BR]"
    "2 (int0-T y(t) cos(i pi t) dt)^2 / int0-T y(t) y(t) dt[BR]",
    "This is useful for principal components obtained from covariance",
    "analysis, since the principal components of random diffusion are",
    "pure cosines.[PAR]",
    
    "Option [TT]-msd[tt] produces the mean square displacement(s).[PAR]",
    
    "Option [TT]-dist[tt] produces distribution plot(s).[PAR]",
    
    "Option [TT]-av[tt] produces the average over the sets.",
    "Error bars can be added with the option [TT]-errbar[tt].",
    "The errorbars can represent the standard deviation, the error",
    "(assuming the points are independent) or the interval containing",
    "90% of the points, by discarding 5% of the points at the top and",
    "the bottom.[PAR]",
    
    "Option [TT]-ee[tt] produces error estimates using block averaging.",
    "A set is divided in a number of blocks and averages are calculated for",
    "each block. The error for the total average is calculated from",
    "the variance between averages of the m blocks B_i as follows:",
    "error^2 = Sum (B_i - <B>)^2 / (m*(m-1)).",
    "These errors are plotted as a function of the block size.",
    "Also an analytical block average curve is plotted, assuming",
    "that the autocorrelation is a sum of two exponentials.",
    "The analytical curve for the block average is:[BR]",
    "f(t) = sigma sqrt(2/T (  a   (tau1 ((exp(-t/tau1) - 1) tau1/t + 1)) +[BR]",
    "                       (1-a) (tau2 ((exp(-t/tau2) - 1) tau2/t + 1)))),[BR]"
    "where T is the total time.",
    "a, tau1 and tau2 are obtained by fitting f^2(t) to error^2.",
    "When the actual block average is very close to the analytical curve,",
    "the error is sigma*sqrt(2/T (a tau1 + (1-a) tau2)).",
    "The complete derivation is given in",
    "B. Hess, J. Chem. Phys. 116:209-217, 2002.[PAR]",

    "Option [TT]-filter[tt] prints the RMS high-frequency fluctuation",
    "of each set and over all sets with respect to a filtered average.",
    "The filter is proportional to cos(pi t/len) where t goes from -len/2",
    "to len/2. len is supplied with the option [TT]-filter[tt].",
    "This filter reduces oscillations with period len/2 and len by a factor",
    "of 0.79 and 0.33 respectively.[PAR]",

    "Option [TT]-g[tt] fits the data to the function given with option",
    "[TT]-fitfn[tt].[PAR]",
    
    "Option [TT]-power[tt] fits the data to b t^a, which is accomplished",
    "by fitting to a t + b on log-log scale. All points after the first",
    "zero or negative value are ignored.[PAR]"
    
    "Option [TT]-luzar[tt] performs a Luzar & Chandler kinetics analysis",
    "on output from [TT]g_hbond[tt]. The input file can be taken directly",
    "from [TT]g_hbond -ac[tt], and then the same result should be produced."
  };
  static real tb=-1,te=-1,frac=0.5,filtlen=0,binwidth=0.1,aver_start=0;
  static bool bHaveT=TRUE,bDer=FALSE,bSubAv=TRUE,bAverCorr=FALSE,bXYdy=FALSE;
  static bool bEESEF=FALSE,bEENLC=FALSE,bEeFitAc=FALSE,bPower=FALSE;
  static bool bIntegrate=FALSE,bRegression=FALSE,bLuzar=FALSE,bLuzarError=FALSE; 
  static int  nsets_in=1,d=1,nb_min=4,resol=10;
  static real temp=298.15,fit_start=1,smooth_tail_start=-1;
  
  /* must correspond to enum avbar* declared at beginning of file */
  static const char *avbar_opt[avbarNR+1] = { 
    NULL, "none", "stddev", "error", "90", NULL
  };

  t_pargs pa[] = {
    { "-time",    FALSE, etBOOL, {&bHaveT},
      "Expect a time in the input" },
    { "-b",       FALSE, etREAL, {&tb},
      "First time to read from set" },
    { "-e",       FALSE, etREAL, {&te},
      "Last time to read from set" },
    { "-n",       FALSE, etINT, {&nsets_in},
      "Read # sets seperated by &" },
    { "-d",       FALSE, etBOOL, {&bDer},
	"Use the derivative" },
    { "-dp",      FALSE, etINT, {&d}, 
      "HIDDENThe derivative is the difference over # points" },
    { "-bw",      FALSE, etREAL, {&binwidth},
      "Binwidth for the distribution" },
    { "-errbar",  FALSE, etENUM, {avbar_opt},
      "Error bars for -av" },
    { "-integrate",FALSE,etBOOL, {&bIntegrate},
      "Integrate data function(s) numerically using trapezium rule" },
    { "-aver_start",FALSE, etREAL, {&aver_start},
      "Start averaging the integral from here" },
    { "-xydy",    FALSE, etBOOL, {&bXYdy},
      "Interpret second data set as error in the y values for integrating" },
    { "-regression",FALSE,etBOOL,{&bRegression},
      "Perform a linear regression analysis on the data" },
    { "-luzar",   FALSE, etBOOL, {&bLuzar},
      "Do a Luzar and Chandler analysis on a correlation function and related as produced by g_hbond. When in addition the -xydy flag is given the second and fourth column will be interpreted as errors in c(t) and n(t)." },
    { "-temp",    FALSE, etREAL, {&temp},
      "Temperature for the Luzar hydrogen bonding kinetics analysis" },
    { "-fitstart", FALSE, etREAL, {&fit_start},
      "Time (ps) from which to start fitting the correlation functions in order to obtain the forward and backward rate constants for HB breaking and formation" }, 
    { "-smooth",FALSE, etREAL, {&smooth_tail_start},
      "If >= 0, the tail of the ACF will be smoothed by fitting it to an exponential function: y = A exp(-x/tau)" },
    { "-nbmin",   FALSE, etINT, {&nb_min},
      "HIDDENMinimum number of blocks for block averaging" },
    { "-resol", FALSE, etINT, {&resol},
      "HIDDENResolution for the block averaging, block size increases with"
    " a factor 2^(1/#)" },
    { "-eeexpfit", FALSE, etBOOL, {&bEESEF},
      "HIDDENAlways use a single exponential fit for the error estimate" },
    { "-eenlc", FALSE, etBOOL, {&bEENLC},
      "HIDDENAllow a negative long-time correlation" },
    { "-eefitac", FALSE, etBOOL, {&bEeFitAc},
      "HIDDENAlso plot analytical block average using a autocorrelation fit" },
    { "-filter",  FALSE, etREAL, {&filtlen},
      "Print the high-frequency fluctuation after filtering with a cosine filter of length #" },
    { "-power", FALSE, etBOOL, {&bPower},
      "Fit data to: b t^a" },
    { "-subav", FALSE, etBOOL, {&bSubAv},
      "Subtract the average before autocorrelating" },
    { "-oneacf", FALSE, etBOOL, {&bAverCorr},
      "Calculate one ACF over all sets" }
  };
#define NPA asize(pa)

  FILE     *out,*out_fit;
  int      n,nlast,s,nset,i,j=0;
  real     **val,*t,dt,tot,error;
  double   *av,*sig,cum1,cum2,cum3,cum4,db;
  char     *acfile,*msdfile,*ccfile,*distfile,*avfile,*eefile,*fitfile;
  
  t_filenm fnm[] = { 
    { efXVG, "-f",    "graph",    ffREAD   },
    { efXVG, "-ac",   "autocorr", ffOPTWR  },
    { efXVG, "-msd",  "msd",      ffOPTWR  },
    { efXVG, "-cc",   "coscont",  ffOPTWR  },
    { efXVG, "-dist", "distr",    ffOPTWR  },
    { efXVG, "-av",   "average",  ffOPTWR  },
    { efXVG, "-ee",   "errest",   ffOPTWR  },
    { efLOG, "-g",    "fitlog",   ffOPTWR  }
  }; 
#define NFILE asize(fnm) 

  int     npargs;
  t_pargs *ppa;

  npargs = asize(pa); 
  ppa    = add_acf_pargs(&npargs,pa);
  
  CopyRight(stderr,argv[0]); 
  parse_common_args(&argc,argv,PCA_CAN_VIEW,
		    NFILE,fnm,npargs,ppa,asize(desc),desc,0,NULL); 

  acfile   = opt2fn_null("-ac",NFILE,fnm);
  msdfile  = opt2fn_null("-msd",NFILE,fnm);
  ccfile   = opt2fn_null("-cc",NFILE,fnm);
  distfile = opt2fn_null("-dist",NFILE,fnm);
  avfile   = opt2fn_null("-av",NFILE,fnm);
  eefile   = opt2fn_null("-ee",NFILE,fnm);
  if (opt2parg_bSet("-fitfn",npargs,ppa)) 
    fitfile  = opt2fn("-g",NFILE,fnm);
  else
    fitfile  = opt2fn_null("-g",NFILE,fnm);
    
  val=read_xvg_time(opt2fn("-f",NFILE,fnm),bHaveT,
		    opt2parg_bSet("-b",npargs,ppa),tb,
		    opt2parg_bSet("-e",npargs,ppa),te,
		    nsets_in,&nset,&n,&dt,&t);
  printf("Read %d sets of %d points, dt = %g\n\n",nset,n,dt);
  
  if (bDer) {
    printf("Calculating the derivative as (f[i+%d]-f[i])/(%d*dt)\n\n",
	    d,d);
    n -= d;
    for(s=0; s<nset; s++)
      for(i=0; (i<n); i++)
	val[s][i] = (val[s][i+d]-val[s][i])/(d*dt);
  }
  if (bIntegrate) {
    real sum,stddev;
    printf("Calculating the integral using the trapezium rule\n");
    
    if (bXYdy) {
      sum = evaluate_integral(n,t,val[0],val[1],aver_start,&stddev);
      printf("Integral %10.3f +/- %10.5f\n",sum,stddev);
    }
    else {
      for(s=0; s<nset; s++) {
	sum = evaluate_integral(n,t,val[s],NULL,aver_start,&stddev);
	printf("Integral %d  %10.5f  +/- %10.5f\n",s+1,sum,stddev);
      }
    }
  }
  if (fitfile) {
    out_fit = ffopen(fitfile,"w");
    if (bXYdy && nset>=2) {
      do_fit(out_fit,0,TRUE,n,t,val,npargs,ppa);
    } else {
      for(s=0; s<nset; s++)
	do_fit(out_fit,s,FALSE,n,t,val,npargs,ppa);
    }
    fclose(out_fit);
  }

  printf("                                      std. dev.    relative deviation of\n");
  printf("                       standard       ---------   cumulants from those of\n");
  printf("set      average       deviation      sqrt(n-1)   a Gaussian distribition\n");
  printf("                                                      cum. 3   cum. 4\n");
  snew(av,nset);
  snew(sig,nset);
  for(s=0; (s<nset); s++) {
    cum1 = 0;
    cum2 = 0;
    cum3 = 0;
    cum4 = 0;
    for(i=0; (i<n); i++)
      cum1 += val[s][i];
    cum1 /= n;
    for(i=0; (i<n); i++) {
      db = val[s][i]-cum1;
      cum2 += db*db;
      cum3 += db*db*db;
      cum4 += db*db*db*db;
    }
    cum2  /= n;
    cum3  /= n;
    cum4  /= n;
    av[s]  = cum1;
    sig[s] = sqrt(cum2);
    if (n > 1)
      error = sqrt(cum2/(n-1));
    else
      error = 0;
    printf("SS%d  %13.6e   %12.6e   %12.6e      %6.3f   %6.3f\n",
	   s+1,av[s],sig[s],error,
	   sig[s] ? cum3/(sig[s]*sig[s]*sig[s]*sqrt(8/M_PI)) : 0,
	   sig[s] ? cum4/(sig[s]*sig[s]*sig[s]*sig[s]*3)-1 : 0); 
  }
  printf("\n");

  if (filtlen)
    filter(filtlen,n,nset,val,dt);
  
  if (msdfile) {
    out=xvgropen(msdfile,"Mean square displacement",
		 "time","MSD (nm\\S2\\N)");
    nlast = (int)(n*frac);
    for(s=0; s<nset; s++) {
      for(j=0; j<=nlast; j++) {
	if (j % 100 == 0)
	  fprintf(stderr,"\r%d",j);
	tot=0;
	for(i=0; i<n-j; i++)
	  tot += sqr(val[s][i]-val[s][i+j]); 
	tot /= (real)(n-j);
	fprintf(out," %g %8g\n",dt*j,tot);
      }
      if (s<nset-1)
	fprintf(out,"&\n");
    }
    fclose(out);
    fprintf(stderr,"\r%d, time=%g\n",j-1,(j-1)*dt);
  }
  if (ccfile)
    plot_coscont(ccfile,n,nset,val);
  
  if (distfile)
    histogram(distfile,binwidth,n,nset,val);
  if (avfile)
    average(avfile,nenum(avbar_opt),n,nset,val,t);
  if (eefile)
    estimate_error(eefile,nb_min,resol,n,nset,av,sig,val,dt,
		   bEeFitAc,bEESEF,bEENLC);
  if (bPower)
    power_fit(n,nset,val,t);
  if (acfile) {
    if (bSubAv) 
      for(s=0; s<nset; s++)
	for(i=0; i<n; i++)
	  val[s][i] -= av[s];
    do_autocorr(acfile,"Autocorrelation",n,nset,val,dt,
		eacNormal,bAverCorr);
  }
  if (bRegression)
    regression_analysis(n,bXYdy,t,val);

  if (bLuzar) 
    luzar_correl(n,t,nset,val,temp,bXYdy,fit_start,smooth_tail_start);
    
  view_all(NFILE, fnm);
  
  thanx(stderr);

  return 0;
}
Ejemplo n.º 2
0
int gmx_analyze(int argc, char *argv[])
{
    static const char *desc[] = {
        "[TT]g_analyze[tt] reads an ASCII file and analyzes data sets.",
        "A line in the input file may start with a time",
        "(see option [TT]-time[tt]) and any number of [IT]y[it]-values may follow.",
        "Multiple sets can also be",
        "read when they are separated by & (option [TT]-n[tt]);",
        "in this case only one [IT]y[it]-value is read from each line.",
        "All lines starting with # and @ are skipped.",
        "All analyses can also be done for the derivative of a set",
        "(option [TT]-d[tt]).[PAR]",

        "All options, except for [TT]-av[tt] and [TT]-power[tt], assume that the",
        "points are equidistant in time.[PAR]",

        "[TT]g_analyze[tt] always shows the average and standard deviation of each",
        "set, as well as the relative deviation of the third",
        "and fourth cumulant from those of a Gaussian distribution with the same",
        "standard deviation.[PAR]",

        "Option [TT]-ac[tt] produces the autocorrelation function(s).",
        "Be sure that the time interval between data points is",
        "much shorter than the time scale of the autocorrelation.[PAR]",

        "Option [TT]-cc[tt] plots the resemblance of set i with a cosine of",
        "i/2 periods. The formula is:[BR]"
        "[MATH]2 ([INT][FROM]0[from][TO]T[to][int] y(t) [COS]i [GRK]pi[grk] t[cos] dt)^2 / [INT][FROM]0[from][TO]T[to][int] y^2(t) dt[math][BR]",
        "This is useful for principal components obtained from covariance",
        "analysis, since the principal components of random diffusion are",
        "pure cosines.[PAR]",

        "Option [TT]-msd[tt] produces the mean square displacement(s).[PAR]",

        "Option [TT]-dist[tt] produces distribution plot(s).[PAR]",

        "Option [TT]-av[tt] produces the average over the sets.",
        "Error bars can be added with the option [TT]-errbar[tt].",
        "The errorbars can represent the standard deviation, the error",
        "(assuming the points are independent) or the interval containing",
        "90% of the points, by discarding 5% of the points at the top and",
        "the bottom.[PAR]",

        "Option [TT]-ee[tt] produces error estimates using block averaging.",
        "A set is divided in a number of blocks and averages are calculated for",
        "each block. The error for the total average is calculated from",
        "the variance between averages of the m blocks B[SUB]i[sub] as follows:",
        "error^2 = [SUM][sum] (B[SUB]i[sub] - [CHEVRON]B[chevron])^2 / (m*(m-1)).",
        "These errors are plotted as a function of the block size.",
        "Also an analytical block average curve is plotted, assuming",
        "that the autocorrelation is a sum of two exponentials.",
        "The analytical curve for the block average is:[BR]",
        "[MATH]f(t) = [GRK]sigma[grk][TT]*[tt][SQRT]2/T (  [GRK]alpha[grk]   ([GRK]tau[grk][SUB]1[sub] (([EXP]-t/[GRK]tau[grk][SUB]1[sub][exp] - 1) [GRK]tau[grk][SUB]1[sub]/t + 1)) +[BR]",
        "                       (1-[GRK]alpha[grk]) ([GRK]tau[grk][SUB]2[sub] (([EXP]-t/[GRK]tau[grk][SUB]2[sub][exp] - 1) [GRK]tau[grk][SUB]2[sub]/t + 1)))[sqrt][math],[BR]"
        "where T is the total time.",
        "[GRK]alpha[grk], [GRK]tau[grk][SUB]1[sub] and [GRK]tau[grk][SUB]2[sub] are obtained by fitting f^2(t) to error^2.",
        "When the actual block average is very close to the analytical curve,",
        "the error is [MATH][GRK]sigma[grk][TT]*[tt][SQRT]2/T (a [GRK]tau[grk][SUB]1[sub] + (1-a) [GRK]tau[grk][SUB]2[sub])[sqrt][math].",
        "The complete derivation is given in",
        "B. Hess, J. Chem. Phys. 116:209-217, 2002.[PAR]",

        "Option [TT]-bal[tt] finds and subtracts the ultrafast \"ballistic\"",
        "component from a hydrogen bond autocorrelation function by the fitting",
        "of a sum of exponentials, as described in e.g.",
        "O. Markovitch, J. Chem. Phys. 129:084505, 2008. The fastest term",
        "is the one with the most negative coefficient in the exponential,",
        "or with [TT]-d[tt], the one with most negative time derivative at time 0.",
        "[TT]-nbalexp[tt] sets the number of exponentials to fit.[PAR]",

        "Option [TT]-gem[tt] fits bimolecular rate constants ka and kb",
        "(and optionally kD) to the hydrogen bond autocorrelation function",
        "according to the reversible geminate recombination model. Removal of",
        "the ballistic component first is strongly advised. The model is presented in",
        "O. Markovitch, J. Chem. Phys. 129:084505, 2008.[PAR]",

        "Option [TT]-filter[tt] prints the RMS high-frequency fluctuation",
        "of each set and over all sets with respect to a filtered average.",
        "The filter is proportional to cos([GRK]pi[grk] t/len) where t goes from -len/2",
        "to len/2. len is supplied with the option [TT]-filter[tt].",
        "This filter reduces oscillations with period len/2 and len by a factor",
        "of 0.79 and 0.33 respectively.[PAR]",

        "Option [TT]-g[tt] fits the data to the function given with option",
        "[TT]-fitfn[tt].[PAR]",

        "Option [TT]-power[tt] fits the data to [MATH]b t^a[math], which is accomplished",
        "by fitting to [MATH]a t + b[math] on log-log scale. All points after the first",
        "zero or with a negative value are ignored.[PAR]"

        "Option [TT]-luzar[tt] performs a Luzar & Chandler kinetics analysis",
        "on output from [TT]g_hbond[tt]. The input file can be taken directly",
        "from [TT]g_hbond -ac[tt], and then the same result should be produced."
    };
    static real        tb         = -1, te = -1, frac = 0.5, filtlen = 0, binwidth = 0.1, aver_start = 0;
    static gmx_bool    bHaveT     = TRUE, bDer = FALSE, bSubAv = TRUE, bAverCorr = FALSE, bXYdy = FALSE;
    static gmx_bool    bEESEF     = FALSE, bEENLC = FALSE, bEeFitAc = FALSE, bPower = FALSE;
    static gmx_bool    bIntegrate = FALSE, bRegression = FALSE, bLuzar = FALSE, bLuzarError = FALSE;
    static int         nsets_in   = 1, d = 1, nb_min = 4, resol = 10, nBalExp = 4, nFitPoints = 100;
    static real        temp       = 298.15, fit_start = 1, fit_end = 60, smooth_tail_start = -1, balTime = 0.2, diffusion = 5e-5, rcut = 0.35;

    /* must correspond to enum avbar* declared at beginning of file */
    static const char *avbar_opt[avbarNR+1] = {
        NULL, "none", "stddev", "error", "90", NULL
    };

    t_pargs            pa[] = {
        { "-time",    FALSE, etBOOL, {&bHaveT},
          "Expect a time in the input" },
        { "-b",       FALSE, etREAL, {&tb},
          "First time to read from set" },
        { "-e",       FALSE, etREAL, {&te},
          "Last time to read from set" },
        { "-n",       FALSE, etINT, {&nsets_in},
          "Read this number of sets separated by &" },
        { "-d",       FALSE, etBOOL, {&bDer},
          "Use the derivative" },
        { "-dp",      FALSE, etINT, {&d},
          "HIDDENThe derivative is the difference over this number of points" },
        { "-bw",      FALSE, etREAL, {&binwidth},
          "Binwidth for the distribution" },
        { "-errbar",  FALSE, etENUM, {avbar_opt},
          "Error bars for [TT]-av[tt]" },
        { "-integrate", FALSE, etBOOL, {&bIntegrate},
          "Integrate data function(s) numerically using trapezium rule" },
        { "-aver_start", FALSE, etREAL, {&aver_start},
          "Start averaging the integral from here" },
        { "-xydy",    FALSE, etBOOL, {&bXYdy},
          "Interpret second data set as error in the y values for integrating" },
        { "-regression", FALSE, etBOOL, {&bRegression},
          "Perform a linear regression analysis on the data. If [TT]-xydy[tt] is set a second set will be interpreted as the error bar in the Y value. Otherwise, if multiple data sets are present a multilinear regression will be performed yielding the constant A that minimize [MATH][GRK]chi[grk]^2 = (y - A[SUB]0[sub] x[SUB]0[sub] - A[SUB]1[sub] x[SUB]1[sub] - ... - A[SUB]N[sub] x[SUB]N[sub])^2[math] where now Y is the first data set in the input file and x[SUB]i[sub] the others. Do read the information at the option [TT]-time[tt]." },
        { "-luzar",   FALSE, etBOOL, {&bLuzar},
          "Do a Luzar and Chandler analysis on a correlation function and related as produced by [TT]g_hbond[tt]. When in addition the [TT]-xydy[tt] flag is given the second and fourth column will be interpreted as errors in c(t) and n(t)." },
        { "-temp",    FALSE, etREAL, {&temp},
          "Temperature for the Luzar hydrogen bonding kinetics analysis (K)" },
        { "-fitstart", FALSE, etREAL, {&fit_start},
          "Time (ps) from which to start fitting the correlation functions in order to obtain the forward and backward rate constants for HB breaking and formation" },
        { "-fitend", FALSE, etREAL, {&fit_end},
          "Time (ps) where to stop fitting the correlation functions in order to obtain the forward and backward rate constants for HB breaking and formation. Only with [TT]-gem[tt]" },
        { "-smooth", FALSE, etREAL, {&smooth_tail_start},
          "If this value is >= 0, the tail of the ACF will be smoothed by fitting it to an exponential function: [MATH]y = A [EXP]-x/[GRK]tau[grk][exp][math]" },
        { "-nbmin",   FALSE, etINT, {&nb_min},
          "HIDDENMinimum number of blocks for block averaging" },
        { "-resol", FALSE, etINT, {&resol},
          "HIDDENResolution for the block averaging, block size increases with"
          " a factor 2^(1/resol)" },
        { "-eeexpfit", FALSE, etBOOL, {&bEESEF},
          "HIDDENAlways use a single exponential fit for the error estimate" },
        { "-eenlc", FALSE, etBOOL, {&bEENLC},
          "HIDDENAllow a negative long-time correlation" },
        { "-eefitac", FALSE, etBOOL, {&bEeFitAc},
          "HIDDENAlso plot analytical block average using a autocorrelation fit" },
        { "-filter",  FALSE, etREAL, {&filtlen},
          "Print the high-frequency fluctuation after filtering with a cosine filter of this length" },
        { "-power", FALSE, etBOOL, {&bPower},
          "Fit data to: b t^a" },
        { "-subav", FALSE, etBOOL, {&bSubAv},
          "Subtract the average before autocorrelating" },
        { "-oneacf", FALSE, etBOOL, {&bAverCorr},
          "Calculate one ACF over all sets" },
        { "-nbalexp", FALSE, etINT, {&nBalExp},
          "HIDDENNumber of exponentials to fit to the ultrafast component" },
        { "-baltime", FALSE, etREAL, {&balTime},
          "HIDDENTime up to which the ballistic component will be fitted" },
/*     { "-gemnp", FALSE, etINT, {&nFitPoints}, */
/*       "HIDDENNumber of data points taken from the ACF to use for fitting to rev. gem. recomb. model."}, */
/*     { "-rcut", FALSE, etREAL, {&rcut}, */
/*       "Cut-off for hydrogen bonds in geminate algorithms" }, */
/*     { "-gemtype", FALSE, etENUM, {gemType}, */
/*       "What type of gminate recombination to use"}, */
/*     { "-D", FALSE, etREAL, {&diffusion}, */
/*       "The self diffusion coefficient which is used for the reversible geminate recombination model."} */
    };
#define NPA asize(pa)

    FILE           *out, *out_fit;
    int             n, nlast, s, nset, i, j = 0;
    real          **val, *t, dt, tot, error;
    double         *av, *sig, cum1, cum2, cum3, cum4, db;
    const char     *acfile, *msdfile, *ccfile, *distfile, *avfile, *eefile, *balfile, *gemfile, *fitfile;
    output_env_t    oenv;

    t_filenm        fnm[] = {
        { efXVG, "-f",    "graph",    ffREAD   },
        { efXVG, "-ac",   "autocorr", ffOPTWR  },
        { efXVG, "-msd",  "msd",      ffOPTWR  },
        { efXVG, "-cc",   "coscont",  ffOPTWR  },
        { efXVG, "-dist", "distr",    ffOPTWR  },
        { efXVG, "-av",   "average",  ffOPTWR  },
        { efXVG, "-ee",   "errest",   ffOPTWR  },
        { efXVG, "-bal",  "ballisitc", ffOPTWR  },
/*     { efXVG, "-gem",  "geminate", ffOPTWR  }, */
        { efLOG, "-g",    "fitlog",   ffOPTWR  }
    };
#define NFILE asize(fnm)

    int      npargs;
    t_pargs *ppa;

    npargs = asize(pa);
    ppa    = add_acf_pargs(&npargs, pa);

    parse_common_args(&argc, argv, PCA_CAN_VIEW,
                      NFILE, fnm, npargs, ppa, asize(desc), desc, 0, NULL, &oenv);

    acfile   = opt2fn_null("-ac", NFILE, fnm);
    msdfile  = opt2fn_null("-msd", NFILE, fnm);
    ccfile   = opt2fn_null("-cc", NFILE, fnm);
    distfile = opt2fn_null("-dist", NFILE, fnm);
    avfile   = opt2fn_null("-av", NFILE, fnm);
    eefile   = opt2fn_null("-ee", NFILE, fnm);
    balfile  = opt2fn_null("-bal", NFILE, fnm);
/*   gemfile  = opt2fn_null("-gem",NFILE,fnm); */
    /* When doing autocorrelation we don't want a fitlog for fitting
     * the function itself (not the acf) when the user did not ask for it.
     */
    if (opt2parg_bSet("-fitfn", npargs, ppa) && acfile == NULL)
    {
        fitfile  = opt2fn("-g", NFILE, fnm);
    }
    else
    {
        fitfile  = opt2fn_null("-g", NFILE, fnm);
    }

    val = read_xvg_time(opt2fn("-f", NFILE, fnm), bHaveT,
                        opt2parg_bSet("-b", npargs, ppa), tb,
                        opt2parg_bSet("-e", npargs, ppa), te,
                        nsets_in, &nset, &n, &dt, &t);
    printf("Read %d sets of %d points, dt = %g\n\n", nset, n, dt);

    if (bDer)
    {
        printf("Calculating the derivative as (f[i+%d]-f[i])/(%d*dt)\n\n",
               d, d);
        n -= d;
        for (s = 0; s < nset; s++)
        {
            for (i = 0; (i < n); i++)
            {
                val[s][i] = (val[s][i+d]-val[s][i])/(d*dt);
            }
        }
    }

    if (bIntegrate)
    {
        real sum, stddev;

        printf("Calculating the integral using the trapezium rule\n");

        if (bXYdy)
        {
            sum = evaluate_integral(n, t, val[0], val[1], aver_start, &stddev);
            printf("Integral %10.3f +/- %10.5f\n", sum, stddev);
        }
        else
        {
            for (s = 0; s < nset; s++)
            {
                sum = evaluate_integral(n, t, val[s], NULL, aver_start, &stddev);
                printf("Integral %d  %10.5f  +/- %10.5f\n", s+1, sum, stddev);
            }
        }
    }

    if (fitfile != NULL)
    {
        out_fit = ffopen(fitfile, "w");
        if (bXYdy && nset >= 2)
        {
            do_fit(out_fit, 0, TRUE, n, t, val, npargs, ppa, oenv);
        }
        else
        {
            for (s = 0; s < nset; s++)
            {
                do_fit(out_fit, s, FALSE, n, t, val, npargs, ppa, oenv);
            }
        }
        ffclose(out_fit);
    }

    printf("                                      std. dev.    relative deviation of\n");
    printf("                       standard       ---------   cumulants from those of\n");
    printf("set      average       deviation      sqrt(n-1)   a Gaussian distribition\n");
    printf("                                                      cum. 3   cum. 4\n");
    snew(av, nset);
    snew(sig, nset);
    for (s = 0; (s < nset); s++)
    {
        cum1 = 0;
        cum2 = 0;
        cum3 = 0;
        cum4 = 0;
        for (i = 0; (i < n); i++)
        {
            cum1 += val[s][i];
        }
        cum1 /= n;
        for (i = 0; (i < n); i++)
        {
            db    = val[s][i]-cum1;
            cum2 += db*db;
            cum3 += db*db*db;
            cum4 += db*db*db*db;
        }
        cum2  /= n;
        cum3  /= n;
        cum4  /= n;
        av[s]  = cum1;
        sig[s] = sqrt(cum2);
        if (n > 1)
        {
            error = sqrt(cum2/(n-1));
        }
        else
        {
            error = 0;
        }
        printf("SS%d  %13.6e   %12.6e   %12.6e      %6.3f   %6.3f\n",
               s+1, av[s], sig[s], error,
               sig[s] ? cum3/(sig[s]*sig[s]*sig[s]*sqrt(8/M_PI)) : 0,
               sig[s] ? cum4/(sig[s]*sig[s]*sig[s]*sig[s]*3)-1 : 0);
    }
    printf("\n");

    if (filtlen)
    {
        filter(filtlen, n, nset, val, dt);
    }

    if (msdfile)
    {
        out = xvgropen(msdfile, "Mean square displacement",
                       "time", "MSD (nm\\S2\\N)", oenv);
        nlast = (int)(n*frac);
        for (s = 0; s < nset; s++)
        {
            for (j = 0; j <= nlast; j++)
            {
                if (j % 100 == 0)
                {
                    fprintf(stderr, "\r%d", j);
                }
                tot = 0;
                for (i = 0; i < n-j; i++)
                {
                    tot += sqr(val[s][i]-val[s][i+j]);
                }
                tot /= (real)(n-j);
                fprintf(out, " %g %8g\n", dt*j, tot);
            }
            if (s < nset-1)
            {
                fprintf(out, "&\n");
            }
        }
        ffclose(out);
        fprintf(stderr, "\r%d, time=%g\n", j-1, (j-1)*dt);
    }
    if (ccfile)
    {
        plot_coscont(ccfile, n, nset, val, oenv);
    }

    if (distfile)
    {
        histogram(distfile, binwidth, n, nset, val, oenv);
    }
    if (avfile)
    {
        average(avfile, nenum(avbar_opt), n, nset, val, t);
    }
    if (eefile)
    {
        estimate_error(eefile, nb_min, resol, n, nset, av, sig, val, dt,
                       bEeFitAc, bEESEF, bEENLC, oenv);
    }
    if (balfile)
    {
        do_ballistic(balfile, n, t, val, nset, balTime, nBalExp, oenv);
    }
/*   if (gemfile) */
/*       do_geminate(gemfile,n,t,val,nset,diffusion,rcut,balTime, */
/*                   nFitPoints, fit_start, fit_end, oenv); */
    if (bPower)
    {
        power_fit(n, nset, val, t);
    }

    if (acfile != NULL)
    {
        if (bSubAv)
        {
            for (s = 0; s < nset; s++)
            {
                for (i = 0; i < n; i++)
                {
                    val[s][i] -= av[s];
                }
            }
        }
        do_autocorr(acfile, oenv, "Autocorrelation", n, nset, val, dt,
                    eacNormal, bAverCorr);
    }

    if (bRegression)
    {
        regression_analysis(n, bXYdy, t, nset, val);
    }

    if (bLuzar)
    {
        luzar_correl(n, t, nset, val, temp, bXYdy, fit_start, smooth_tail_start, oenv);
    }

    view_all(oenv, NFILE, fnm);

    return 0;
}